2024-11-22 15:21:44,888 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-22 15:21:44,914 main DEBUG Took 0.023407 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 15:21:44,915 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 15:21:44,916 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 15:21:44,917 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 15:21:44,919 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,930 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 15:21:44,957 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,959 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,960 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,960 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,961 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,961 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,965 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,966 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,968 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,968 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,970 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,970 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,971 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,973 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,973 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,974 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,975 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,976 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,976 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,977 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,978 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,979 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,979 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 15:21:44,980 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,981 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 15:21:44,984 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 15:21:44,986 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 15:21:44,989 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 15:21:44,990 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 15:21:44,992 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 15:21:44,993 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 15:21:45,005 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 15:21:45,008 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 15:21:45,011 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 15:21:45,012 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 15:21:45,012 main DEBUG createAppenders(={Console}) 2024-11-22 15:21:45,013 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-22 15:21:45,014 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-22 15:21:45,014 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-22 15:21:45,015 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 15:21:45,015 main DEBUG OutputStream closed 2024-11-22 15:21:45,016 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 15:21:45,016 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 15:21:45,017 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-22 15:21:45,243 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 15:21:45,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 15:21:45,254 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 15:21:45,256 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 15:21:45,259 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 15:21:45,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 15:21:45,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 15:21:45,261 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 15:21:45,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 15:21:45,262 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 15:21:45,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 15:21:45,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 15:21:45,264 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 15:21:45,264 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 15:21:45,265 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 15:21:45,265 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 15:21:45,266 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 15:21:45,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 15:21:45,290 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 15:21:45,292 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-22 15:21:45,293 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 15:21:45,296 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-22T15:21:45,898 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581 2024-11-22 15:21:45,902 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 15:21:45,903 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T15:21:45,944 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-22T15:21:46,015 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T15:21:46,032 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2, deleteOnExit=true 2024-11-22T15:21:46,034 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-22T15:21:46,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/test.cache.data in system properties and HBase conf 2024-11-22T15:21:46,038 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T15:21:46,040 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.log.dir in system properties and HBase conf 2024-11-22T15:21:46,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T15:21:46,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T15:21:46,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-22T15:21:46,246 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T15:21:46,522 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T15:21:46,538 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T15:21:46,553 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T15:21:46,554 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T15:21:46,554 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T15:21:46,555 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T15:21:46,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T15:21:46,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T15:21:46,557 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T15:21:46,558 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T15:21:46,559 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/nfs.dump.dir in system properties and HBase conf 2024-11-22T15:21:46,559 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/java.io.tmpdir in system properties and HBase conf 2024-11-22T15:21:46,560 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T15:21:46,561 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T15:21:46,561 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T15:21:48,806 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T15:21:49,012 INFO [Time-limited test {}] log.Log(170): Logging initialized @4965ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T15:21:49,187 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T15:21:49,350 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T15:21:49,468 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T15:21:49,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T15:21:49,473 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T15:21:49,521 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T15:21:49,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.log.dir/,AVAILABLE} 2024-11-22T15:21:49,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T15:21:49,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/java.io.tmpdir/jetty-localhost-35773-hadoop-hdfs-3_4_1-tests_jar-_-any-12272891798796957676/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T15:21:50,001 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:35773} 2024-11-22T15:21:50,001 INFO [Time-limited test {}] server.Server(415): Started @5956ms 2024-11-22T15:21:51,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T15:21:51,168 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T15:21:51,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T15:21:51,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T15:21:51,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T15:21:51,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.log.dir/,AVAILABLE} 2024-11-22T15:21:51,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T15:21:51,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/java.io.tmpdir/jetty-localhost-32931-hadoop-hdfs-3_4_1-tests_jar-_-any-2982222211401059819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T15:21:51,405 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:32931} 2024-11-22T15:21:51,406 INFO [Time-limited test {}] server.Server(415): Started @7360ms 2024-11-22T15:21:51,509 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T15:21:52,739 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/dfs/data/data1/current/BP-1976188293-172.17.0.2-1732288907727/current, will proceed with Du for space computation calculation, 2024-11-22T15:21:52,740 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/dfs/data/data2/current/BP-1976188293-172.17.0.2-1732288907727/current, will proceed with Du for space computation calculation, 2024-11-22T15:21:52,891 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T15:21:52,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x860489754dca3c06 with lease ID 0xd6916d9f0e19e697: Processing first storage report for DS-d003c28b-66c3-42af-b20c-0c07c7f24f3c from datanode DatanodeRegistration(127.0.0.1:42059, datanodeUuid=38ee6b56-866f-4b24-8b0c-ca8404a19b80, infoPort=44369, infoSecurePort=0, ipcPort=45933, storageInfo=lv=-57;cid=testClusterID;nsid=1798722477;c=1732288907727) 2024-11-22T15:21:52,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x860489754dca3c06 with lease ID 0xd6916d9f0e19e697: from storage DS-d003c28b-66c3-42af-b20c-0c07c7f24f3c node DatanodeRegistration(127.0.0.1:42059, datanodeUuid=38ee6b56-866f-4b24-8b0c-ca8404a19b80, infoPort=44369, infoSecurePort=0, ipcPort=45933, storageInfo=lv=-57;cid=testClusterID;nsid=1798722477;c=1732288907727), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-22T15:21:52,971 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x860489754dca3c06 with lease ID 0xd6916d9f0e19e697: Processing first storage report for DS-6a603b88-4cc6-4355-ab56-04f91ae7b4a6 from datanode DatanodeRegistration(127.0.0.1:42059, datanodeUuid=38ee6b56-866f-4b24-8b0c-ca8404a19b80, infoPort=44369, infoSecurePort=0, ipcPort=45933, storageInfo=lv=-57;cid=testClusterID;nsid=1798722477;c=1732288907727) 2024-11-22T15:21:52,972 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x860489754dca3c06 with lease ID 0xd6916d9f0e19e697: from storage DS-6a603b88-4cc6-4355-ab56-04f91ae7b4a6 node DatanodeRegistration(127.0.0.1:42059, datanodeUuid=38ee6b56-866f-4b24-8b0c-ca8404a19b80, infoPort=44369, infoSecurePort=0, ipcPort=45933, storageInfo=lv=-57;cid=testClusterID;nsid=1798722477;c=1732288907727), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T15:21:52,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581 2024-11-22T15:21:53,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/zookeeper_0, clientPort=52970, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T15:21:53,345 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52970 2024-11-22T15:21:53,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:53,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741825_1001 (size=7) 2024-11-22T15:21:54,275 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 with version=8 2024-11-22T15:21:54,275 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/hbase-staging 2024-11-22T15:21:54,444 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T15:21:54,759 INFO [Time-limited test {}] client.ConnectionUtils(129): master/77927f992d0b:0 server-side Connection retries=45 2024-11-22T15:21:54,784 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:54,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:54,785 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T15:21:54,785 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:54,786 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T15:21:55,032 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T15:21:55,123 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T15:21:55,135 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T15:21:55,140 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T15:21:55,178 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 55312 (auto-detected) 2024-11-22T15:21:55,180 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T15:21:55,227 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38317 2024-11-22T15:21:55,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:55,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:55,266 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38317 connecting to ZooKeeper ensemble=127.0.0.1:52970 2024-11-22T15:21:55,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383170x0, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T15:21:55,470 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38317-0x101646cc1b90000 connected 2024-11-22T15:21:55,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T15:21:55,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T15:21:55,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T15:21:55,666 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38317 2024-11-22T15:21:55,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38317 2024-11-22T15:21:55,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38317 2024-11-22T15:21:55,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38317 2024-11-22T15:21:55,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38317 2024-11-22T15:21:55,692 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690, hbase.cluster.distributed=false 2024-11-22T15:21:55,812 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/77927f992d0b:0 server-side Connection retries=45 2024-11-22T15:21:55,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:55,814 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:55,815 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T15:21:55,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T15:21:55,815 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T15:21:55,817 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T15:21:55,831 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T15:21:55,842 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36033 2024-11-22T15:21:55,845 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T15:21:55,858 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T15:21:55,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:55,867 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:55,881 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36033 connecting to ZooKeeper ensemble=127.0.0.1:52970 2024-11-22T15:21:55,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360330x0, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T15:21:55,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T15:21:55,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36033-0x101646cc1b90001 connected 2024-11-22T15:21:55,906 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T15:21:55,907 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T15:21:55,919 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-22T15:21:55,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36033 2024-11-22T15:21:55,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36033 2024-11-22T15:21:55,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-22T15:21:55,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36033 2024-11-22T15:21:55,956 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/77927f992d0b,38317,1732288914436 2024-11-22T15:21:55,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T15:21:55,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T15:21:55,978 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/77927f992d0b,38317,1732288914436 2024-11-22T15:21:55,981 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;77927f992d0b:38317 2024-11-22T15:21:56,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T15:21:56,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T15:21:56,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:56,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:56,019 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T15:21:56,019 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T15:21:56,020 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/77927f992d0b,38317,1732288914436 from backup master directory 2024-11-22T15:21:56,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T15:21:56,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/77927f992d0b,38317,1732288914436 2024-11-22T15:21:56,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T15:21:56,031 WARN [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T15:21:56,031 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=77927f992d0b,38317,1732288914436 2024-11-22T15:21:56,034 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T15:21:56,038 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T15:21:56,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741826_1002 (size=42) 2024-11-22T15:21:56,203 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/hbase.id with ID: b6b815c3-2cdd-4099-bbe0-a7c3b3fc1cfa 2024-11-22T15:21:56,281 INFO [master/77927f992d0b:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T15:21:56,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:56,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:56,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741827_1003 (size=196) 2024-11-22T15:21:56,384 INFO [master/77927f992d0b:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:21:56,388 INFO [master/77927f992d0b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T15:21:56,418 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:21:56,424 INFO [master/77927f992d0b:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T15:21:56,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741828_1004 (size=1189) 2024-11-22T15:21:56,905 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store 2024-11-22T15:21:56,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741829_1005 (size=34) 2024-11-22T15:21:56,989 INFO [master/77927f992d0b:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T15:21:56,992 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:21:56,995 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T15:21:56,996 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:21:56,996 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:21:56,997 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T15:21:56,999 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:21:56,999 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:21:57,000 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T15:21:57,013 WARN [master/77927f992d0b:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/.initializing 2024-11-22T15:21:57,013 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/WALs/77927f992d0b,38317,1732288914436 2024-11-22T15:21:57,036 INFO [master/77927f992d0b:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T15:21:57,061 INFO [master/77927f992d0b:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=77927f992d0b%2C38317%2C1732288914436, suffix=, logDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/WALs/77927f992d0b,38317,1732288914436, archiveDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/oldWALs, maxLogs=10 2024-11-22T15:21:57,105 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/WALs/77927f992d0b,38317,1732288914436/77927f992d0b%2C38317%2C1732288914436.1732288917068, exclude list is [], retry=0 2024-11-22T15:21:57,128 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42059,DS-d003c28b-66c3-42af-b20c-0c07c7f24f3c,DISK] 2024-11-22T15:21:57,132 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-22T15:21:57,196 INFO [master/77927f992d0b:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/WALs/77927f992d0b,38317,1732288914436/77927f992d0b%2C38317%2C1732288914436.1732288917068 2024-11-22T15:21:57,198 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369)] 2024-11-22T15:21:57,198 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:21:57,199 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:21:57,204 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,206 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T15:21:57,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:57,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:57,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,369 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T15:21:57,370 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:57,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:21:57,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T15:21:57,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:57,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:21:57,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T15:21:57,386 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:57,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:21:57,396 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,397 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,415 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T15:21:57,424 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T15:21:57,432 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:21:57,434 INFO [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71656375, jitterRate=0.06776319444179535}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T15:21:57,441 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T15:21:57,446 INFO [master/77927f992d0b:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T15:21:57,515 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61a883fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:21:57,579 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-22T15:21:57,596 INFO [master/77927f992d0b:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T15:21:57,597 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T15:21:57,601 INFO [master/77927f992d0b:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T15:21:57,603 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 2 msec 2024-11-22T15:21:57,611 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec 2024-11-22T15:21:57,612 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T15:21:57,662 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T15:21:57,690 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T15:21:57,733 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-22T15:21:57,736 INFO [master/77927f992d0b:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T15:21:57,737 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T15:21:57,746 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-22T15:21:57,749 INFO [master/77927f992d0b:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T15:21:57,765 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T15:21:57,778 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-22T15:21:57,781 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T15:21:57,791 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T15:21:57,806 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T15:21:57,833 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T15:21:57,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T15:21:57,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T15:21:57,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,851 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=77927f992d0b,38317,1732288914436, sessionid=0x101646cc1b90000, setting cluster-up flag (Was=false) 2024-11-22T15:21:57,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,932 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T15:21:57,938 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=77927f992d0b,38317,1732288914436 2024-11-22T15:21:57,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:57,983 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T15:21:57,985 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=77927f992d0b,38317,1732288914436 2024-11-22T15:21:58,082 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;77927f992d0b:36033 2024-11-22T15:21:58,092 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1008): ClusterId : b6b815c3-2cdd-4099-bbe0-a7c3b3fc1cfa 2024-11-22T15:21:58,095 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T15:21:58,109 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T15:21:58,109 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T15:21:58,117 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T15:21:58,118 DEBUG [RS:0;77927f992d0b:36033 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb4b91b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:21:58,120 DEBUG [RS:0;77927f992d0b:36033 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49141f6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=77927f992d0b/172.17.0.2:0 2024-11-22T15:21:58,124 INFO [RS:0;77927f992d0b:36033 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-22T15:21:58,124 INFO [RS:0;77927f992d0b:36033 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-22T15:21:58,125 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-22T15:21:58,126 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-22T15:21:58,127 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(3073): reportForDuty to master=77927f992d0b,38317,1732288914436 with isa=77927f992d0b/172.17.0.2:36033, startcode=1732288915809 2024-11-22T15:21:58,133 INFO [master/77927f992d0b:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-22T15:21:58,137 INFO [master/77927f992d0b:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T15:21:58,143 DEBUG [RS:0;77927f992d0b:36033 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T15:21:58,145 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 77927f992d0b,38317,1732288914436 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T15:21:58,156 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/77927f992d0b:0, corePoolSize=5, maxPoolSize=5 2024-11-22T15:21:58,156 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/77927f992d0b:0, corePoolSize=5, maxPoolSize=5 2024-11-22T15:21:58,157 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/77927f992d0b:0, corePoolSize=5, maxPoolSize=5 2024-11-22T15:21:58,157 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/77927f992d0b:0, corePoolSize=5, maxPoolSize=5 2024-11-22T15:21:58,157 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/77927f992d0b:0, corePoolSize=10, maxPoolSize=10 2024-11-22T15:21:58,157 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,157 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/77927f992d0b:0, corePoolSize=2, maxPoolSize=2 2024-11-22T15:21:58,158 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,180 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-22T15:21:58,181 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-22T15:21:58,190 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:58,191 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T15:21:58,208 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732288948208 2024-11-22T15:21:58,209 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T15:21:58,210 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T15:21:58,214 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T15:21:58,215 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T15:21:58,215 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T15:21:58,215 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T15:21:58,227 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,230 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T15:21:58,232 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T15:21:58,241 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T15:21:58,244 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T15:21:58,244 INFO [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T15:21:58,250 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.large.0-1732288918246,5,FailOnTimeoutGroup] 2024-11-22T15:21:58,251 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.small.0-1732288918251,5,FailOnTimeoutGroup] 2024-11-22T15:21:58,252 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,255 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T15:21:58,257 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,259 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,263 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40567, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T15:21:58,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741831_1007 (size=1039) 2024-11-22T15:21:58,277 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38317 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,282 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-22T15:21:58,283 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:21:58,283 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38317 {}] master.ServerManager(486): Registering regionserver=77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,317 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:21:58,317 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34007 2024-11-22T15:21:58,317 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-22T15:21:58,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T15:21:58,333 DEBUG [RS:0;77927f992d0b:36033 {}] zookeeper.ZKUtil(111): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,333 WARN [RS:0;77927f992d0b:36033 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T15:21:58,333 INFO [RS:0;77927f992d0b:36033 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T15:21:58,334 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741832_1008 (size=32) 2024-11-22T15:21:58,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:21:58,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [77927f992d0b,36033,1732288915809] 2024-11-22T15:21:58,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T15:21:58,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T15:21:58,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:58,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:58,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T15:21:58,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T15:21:58,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:58,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:58,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T15:21:58,392 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T15:21:58,392 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:58,394 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:58,396 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740 2024-11-22T15:21:58,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740 2024-11-22T15:21:58,405 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:21:58,408 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-22T15:21:58,411 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-22T15:21:58,431 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:21:58,431 INFO [RS:0;77927f992d0b:36033 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T15:21:58,434 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61901123, jitterRate=-0.07760138809680939}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:21:58,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-22T15:21:58,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-22T15:21:58,440 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-22T15:21:58,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-22T15:21:58,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T15:21:58,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T15:21:58,454 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-22T15:21:58,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-22T15:21:58,458 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-22T15:21:58,458 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-22T15:21:58,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T15:21:58,471 INFO [RS:0;77927f992d0b:36033 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T15:21:58,485 INFO [RS:0;77927f992d0b:36033 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T15:21:58,485 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,486 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T15:21:58,493 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T15:21:58,493 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-22T15:21:58,510 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,511 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,511 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,512 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,512 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,512 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,513 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/77927f992d0b:0, corePoolSize=2, maxPoolSize=2 2024-11-22T15:21:58,513 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,513 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,513 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,513 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,514 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/77927f992d0b:0, corePoolSize=1, maxPoolSize=1 2024-11-22T15:21:58,514 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/77927f992d0b:0, corePoolSize=3, maxPoolSize=3 2024-11-22T15:21:58,514 DEBUG [RS:0;77927f992d0b:36033 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0, corePoolSize=3, maxPoolSize=3 2024-11-22T15:21:58,523 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,523 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,524 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,524 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,524 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,36033,1732288915809-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T15:21:58,575 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T15:21:58,578 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,36033,1732288915809-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:58,648 WARN [77927f992d0b:38317 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-22T15:21:58,655 INFO [RS:0;77927f992d0b:36033 {}] regionserver.Replication(204): 77927f992d0b,36033,1732288915809 started 2024-11-22T15:21:58,655 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1767): Serving as 77927f992d0b,36033,1732288915809, RpcServer on 77927f992d0b/172.17.0.2:36033, sessionid=0x101646cc1b90001 2024-11-22T15:21:58,656 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T15:21:58,657 DEBUG [RS:0;77927f992d0b:36033 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,657 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '77927f992d0b,36033,1732288915809' 2024-11-22T15:21:58,657 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T15:21:58,658 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T15:21:58,662 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T15:21:58,663 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T15:21:58,663 DEBUG [RS:0;77927f992d0b:36033 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,663 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '77927f992d0b,36033,1732288915809' 2024-11-22T15:21:58,664 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T15:21:58,666 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T15:21:58,668 DEBUG [RS:0;77927f992d0b:36033 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T15:21:58,668 INFO [RS:0;77927f992d0b:36033 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T15:21:58,668 INFO [RS:0;77927f992d0b:36033 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T15:21:58,774 INFO [RS:0;77927f992d0b:36033 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-22T15:21:58,779 INFO [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=77927f992d0b%2C36033%2C1732288915809, suffix=, logDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809, archiveDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/oldWALs, maxLogs=32 2024-11-22T15:21:58,809 DEBUG [RS:0;77927f992d0b:36033 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809/77927f992d0b%2C36033%2C1732288915809.1732288918783, exclude list is [], retry=0 2024-11-22T15:21:58,820 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42059,DS-d003c28b-66c3-42af-b20c-0c07c7f24f3c,DISK] 2024-11-22T15:21:58,843 INFO [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809/77927f992d0b%2C36033%2C1732288915809.1732288918783 2024-11-22T15:21:58,849 DEBUG [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369)] 2024-11-22T15:21:58,900 DEBUG [77927f992d0b:38317 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T15:21:58,906 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:21:58,912 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 77927f992d0b,36033,1732288915809, state=OPENING 2024-11-22T15:21:58,957 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T15:21:58,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:58,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:21:58,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T15:21:58,970 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T15:21:58,970 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:21:59,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:21:59,174 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T15:21:59,193 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T15:21:59,232 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-22T15:21:59,233 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-22T15:21:59,234 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-22T15:21:59,251 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=77927f992d0b%2C36033%2C1732288915809.meta, suffix=.meta, logDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809, archiveDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/oldWALs, maxLogs=32 2024-11-22T15:21:59,279 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809/77927f992d0b%2C36033%2C1732288915809.meta.1732288919254.meta, exclude list is [], retry=0 2024-11-22T15:21:59,305 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42059,DS-d003c28b-66c3-42af-b20c-0c07c7f24f3c,DISK] 2024-11-22T15:21:59,323 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/WALs/77927f992d0b,36033,1732288915809/77927f992d0b%2C36033%2C1732288915809.meta.1732288919254.meta 2024-11-22T15:21:59,327 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44369:44369)] 2024-11-22T15:21:59,328 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:21:59,330 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T15:21:59,485 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T15:21:59,493 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T15:21:59,503 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T15:21:59,503 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:21:59,504 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-22T15:21:59,504 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-22T15:21:59,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T15:21:59,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T15:21:59,526 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:59,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:59,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T15:21:59,536 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T15:21:59,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:59,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:59,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T15:21:59,544 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T15:21:59,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:59,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T15:21:59,552 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740 2024-11-22T15:21:59,562 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740 2024-11-22T15:21:59,570 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:21:59,580 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-22T15:21:59,588 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64032417, jitterRate=-0.04584263265132904}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:21:59,592 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-22T15:21:59,600 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732288919164 2024-11-22T15:21:59,619 DEBUG [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T15:21:59,620 INFO [RS_OPEN_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-22T15:21:59,622 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:21:59,625 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 77927f992d0b,36033,1732288915809, state=OPEN 2024-11-22T15:21:59,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T15:21:59,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T15:21:59,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T15:21:59,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T15:21:59,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T15:21:59,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=77927f992d0b,36033,1732288915809 in 716 msec 2024-11-22T15:21:59,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T15:21:59,723 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2250 sec 2024-11-22T15:21:59,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.6610 sec 2024-11-22T15:21:59,733 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732288919732, completionTime=-1 2024-11-22T15:21:59,733 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T15:21:59,733 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-22T15:21:59,789 DEBUG [hconnection-0x6a38d96a-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:21:59,792 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:21:59,807 INFO [master/77927f992d0b:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-22T15:21:59,807 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732288979807 2024-11-22T15:21:59,808 INFO [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732289039808 2024-11-22T15:21:59,808 INFO [master/77927f992d0b:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 74 msec 2024-11-22T15:21:59,868 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:59,868 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:59,868 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:59,871 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-77927f992d0b:38317, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:59,875 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T15:21:59,885 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-22T15:21:59,888 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T15:21:59,899 DEBUG [master/77927f992d0b:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-22T15:21:59,900 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-22T15:21:59,904 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:21:59,906 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:21:59,909 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:21:59,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741835_1011 (size=358) 2024-11-22T15:21:59,970 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e7d35e0ae1b576a64b6f8105b0d3681e, NAME => 'hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:22:00,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741836_1012 (size=42) 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e7d35e0ae1b576a64b6f8105b0d3681e, disabling compactions & flushes 2024-11-22T15:22:00,471 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. after waiting 0 ms 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,471 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,471 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e7d35e0ae1b576a64b6f8105b0d3681e: 2024-11-22T15:22:00,475 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:22:00,482 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732288920476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732288920476"}]},"ts":"1732288920476"} 2024-11-22T15:22:00,520 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:22:00,523 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:22:00,527 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288920524"}]},"ts":"1732288920524"} 2024-11-22T15:22:00,536 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-22T15:22:00,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e7d35e0ae1b576a64b6f8105b0d3681e, ASSIGN}] 2024-11-22T15:22:00,589 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e7d35e0ae1b576a64b6f8105b0d3681e, ASSIGN 2024-11-22T15:22:00,592 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e7d35e0ae1b576a64b6f8105b0d3681e, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:22:00,742 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e7d35e0ae1b576a64b6f8105b0d3681e, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:00,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e7d35e0ae1b576a64b6f8105b0d3681e, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:00,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:00,909 INFO [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e7d35e0ae1b576a64b6f8105b0d3681e, NAME => 'hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:22:00,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:00,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,910 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,915 INFO [StoreOpener-e7d35e0ae1b576a64b6f8105b0d3681e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,918 INFO [StoreOpener-e7d35e0ae1b576a64b6f8105b0d3681e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7d35e0ae1b576a64b6f8105b0d3681e columnFamilyName info 2024-11-22T15:22:00,918 DEBUG [StoreOpener-e7d35e0ae1b576a64b6f8105b0d3681e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:00,919 INFO [StoreOpener-e7d35e0ae1b576a64b6f8105b0d3681e-1 {}] regionserver.HStore(327): Store=e7d35e0ae1b576a64b6f8105b0d3681e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:00,921 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,922 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,926 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:22:00,931 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:22:00,932 INFO [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e7d35e0ae1b576a64b6f8105b0d3681e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63368438, jitterRate=-0.055736690759658813}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T15:22:00,933 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e7d35e0ae1b576a64b6f8105b0d3681e: 2024-11-22T15:22:00,936 INFO [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e., pid=6, masterSystemTime=1732288920901 2024-11-22T15:22:00,940 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,940 INFO [RS_OPEN_PRIORITY_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:22:00,942 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e7d35e0ae1b576a64b6f8105b0d3681e, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:00,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T15:22:00,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e7d35e0ae1b576a64b6f8105b0d3681e, server=77927f992d0b,36033,1732288915809 in 201 msec 2024-11-22T15:22:00,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T15:22:00,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e7d35e0ae1b576a64b6f8105b0d3681e, ASSIGN in 369 msec 2024-11-22T15:22:00,960 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:22:00,960 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288920960"}]},"ts":"1732288920960"} 2024-11-22T15:22:00,964 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-22T15:22:01,001 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:22:01,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.1120 sec 2024-11-22T15:22:01,009 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-22T15:22:01,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-22T15:22:01,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:22:01,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:22:01,059 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-22T15:22:01,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-22T15:22:01,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 53 msec 2024-11-22T15:22:01,121 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-22T15:22:01,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-22T15:22:01,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 48 msec 2024-11-22T15:22:01,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-22T15:22:01,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-22T15:22:01,216 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.184sec 2024-11-22T15:22:01,218 INFO [master/77927f992d0b:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T15:22:01,220 INFO [master/77927f992d0b:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T15:22:01,221 INFO [master/77927f992d0b:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T15:22:01,225 INFO [master/77927f992d0b:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T15:22:01,225 INFO [master/77927f992d0b:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T15:22:01,226 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T15:22:01,226 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T15:22:01,237 DEBUG [master/77927f992d0b:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-22T15:22:01,239 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T15:22:01,239 INFO [master/77927f992d0b:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=77927f992d0b,38317,1732288914436-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T15:22:01,309 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-11-22T15:22:01,310 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-22T15:22:01,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:01,348 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T15:22:01,348 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T15:22:01,378 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:01,407 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:01,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=77927f992d0b,38317,1732288914436 2024-11-22T15:22:01,451 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=415, ProcessCount=11, AvailableMemoryMB=4916 2024-11-22T15:22:01,465 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:22:01,482 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57410, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:22:01,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:22:01,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:22:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:01,507 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:22:01,508 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:01,510 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:22:01,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-22T15:22:01,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T15:22:01,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741837_1013 (size=963) 2024-11-22T15:22:01,577 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:22:01,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741838_1014 (size=53) 2024-11-22T15:22:01,613 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:01,613 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ed6f777bba2efed5f759348895e3133f, disabling compactions & flushes 2024-11-22T15:22:01,613 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:01,614 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:01,614 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. after waiting 0 ms 2024-11-22T15:22:01,614 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:01,614 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:01,614 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:01,618 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:22:01,619 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732288921618"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732288921618"}]},"ts":"1732288921618"} 2024-11-22T15:22:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T15:22:01,674 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:22:01,677 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:22:01,677 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288921677"}]},"ts":"1732288921677"} 2024-11-22T15:22:01,682 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:22:01,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, ASSIGN}] 2024-11-22T15:22:01,769 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, ASSIGN 2024-11-22T15:22:01,770 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:22:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T15:22:01,921 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ed6f777bba2efed5f759348895e3133f, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:01,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:02,080 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:02,092 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:02,092 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:22:02,093 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,093 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:02,093 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,093 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,113 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,116 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:02,117 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed6f777bba2efed5f759348895e3133f columnFamilyName A 2024-11-22T15:22:02,117 DEBUG [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:02,118 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(327): Store=ed6f777bba2efed5f759348895e3133f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:02,119 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,121 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:02,121 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed6f777bba2efed5f759348895e3133f columnFamilyName B 2024-11-22T15:22:02,122 DEBUG [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:02,122 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(327): Store=ed6f777bba2efed5f759348895e3133f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:02,123 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,125 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:02,125 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed6f777bba2efed5f759348895e3133f columnFamilyName C 2024-11-22T15:22:02,125 DEBUG [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:02,126 INFO [StoreOpener-ed6f777bba2efed5f759348895e3133f-1 {}] regionserver.HStore(327): Store=ed6f777bba2efed5f759348895e3133f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:02,127 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:02,128 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,129 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,133 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:22:02,136 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:02,139 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:22:02,140 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened ed6f777bba2efed5f759348895e3133f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68308059, jitterRate=0.01786939799785614}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:22:02,141 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:02,143 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., pid=11, masterSystemTime=1732288922080 2024-11-22T15:22:02,146 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:02,146 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:02,147 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=ed6f777bba2efed5f759348895e3133f, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:02,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-22T15:22:02,156 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 in 224 msec 2024-11-22T15:22:02,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T15:22:02,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, ASSIGN in 388 msec 2024-11-22T15:22:02,164 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:22:02,164 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288922164"}]},"ts":"1732288922164"} 2024-11-22T15:22:02,167 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:22:02,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T15:22:02,218 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:22:02,221 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 717 msec 2024-11-22T15:22:02,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-22T15:22:02,683 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-22T15:22:02,689 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fcb5f29 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fdf5682 2024-11-22T15:22:02,733 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6e36fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,737 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,740 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,745 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:22:02,747 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:22:02,759 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f2091cc to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d38d10 2024-11-22T15:22:02,772 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f343a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09bd0964 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c63ae4e 2024-11-22T15:22:02,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1324ee83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,791 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18cb251d to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@736f1673 2024-11-22T15:22:02,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478bae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45b55c24 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ee2166f 2024-11-22T15:22:02,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48068a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e52b42a to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f34ff67 2024-11-22T15:22:02,831 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,833 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ed28bb to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b5cad1a 2024-11-22T15:22:02,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295cb1ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,849 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a1285d to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c3b736e 2024-11-22T15:22:02,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70267494, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,867 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-11-22T15:22:02,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,877 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47fe2fa7 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6502d571 2024-11-22T15:22:02,889 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c915d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:02,895 DEBUG [hconnection-0x3d0fdd06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,904 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,908 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,910 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:02,915 DEBUG [hconnection-0x40eb5aeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,916 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-22T15:22:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:02,922 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:02,924 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:02,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:02,932 DEBUG [hconnection-0x131c45f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,939 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,947 DEBUG [hconnection-0x11fa2cd8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,949 DEBUG [hconnection-0x48d3ed27-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,952 DEBUG [hconnection-0xc96820-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,952 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,954 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,963 DEBUG [hconnection-0x454da7b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:02,965 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,966 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,976 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:02,986 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:03,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:03,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:03,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:03,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:03,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:03,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:03,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:03,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811a098b918240498f17800d23a78de0 is 50, key is test_row_0/A:col10/1732288922997/Put/seqid=0 2024-11-22T15:22:03,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288983281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288983286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288983294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288983294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288983295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741839_1015 (size=14341) 2024-11-22T15:22:03,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288983438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288983439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288983440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288983439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288983444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:03,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288983649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288983651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288983655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288983657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288983662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811a098b918240498f17800d23a78de0 2024-11-22T15:22:03,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3fa8ab96d7a94f70a547b2b30b426dc7 is 50, key is test_row_0/B:col10/1732288922997/Put/seqid=0 2024-11-22T15:22:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741840_1016 (size=12001) 2024-11-22T15:22:03,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3fa8ab96d7a94f70a547b2b30b426dc7 2024-11-22T15:22:03,956 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:03,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:03,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:03,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:03,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288983964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288983969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288983969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288983970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:03,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:03,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288983974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9e4031ecd26b41a0a4539aae86f0e5ad is 50, key is test_row_0/C:col10/1732288922997/Put/seqid=0 2024-11-22T15:22:04,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741841_1017 (size=12001) 2024-11-22T15:22:04,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9e4031ecd26b41a0a4539aae86f0e5ad 2024-11-22T15:22:04,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811a098b918240498f17800d23a78de0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0 2024-11-22T15:22:04,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0, entries=200, sequenceid=12, filesize=14.0 K 2024-11-22T15:22:04,115 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:04,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:04,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:04,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:04,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:04,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:04,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:04,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3fa8ab96d7a94f70a547b2b30b426dc7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7 2024-11-22T15:22:04,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T15:22:04,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9e4031ecd26b41a0a4539aae86f0e5ad as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad 2024-11-22T15:22:04,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad, entries=150, sequenceid=12, filesize=11.7 K 2024-11-22T15:22:04,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for ed6f777bba2efed5f759348895e3133f in 1150ms, sequenceid=12, compaction requested=false 2024-11-22T15:22:04,163 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T15:22:04,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:04,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-22T15:22:04,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:04,274 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T15:22:04,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/076afb52330547c49cd0cb45be340f06 is 50, key is test_row_0/A:col10/1732288923287/Put/seqid=0 2024-11-22T15:22:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741842_1018 (size=12001) 2024-11-22T15:22:04,348 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/076afb52330547c49cd0cb45be340f06 2024-11-22T15:22:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2a456b2630024494b6d05ab7c6cafdc6 is 50, key is test_row_0/B:col10/1732288923287/Put/seqid=0 2024-11-22T15:22:04,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741843_1019 (size=12001) 2024-11-22T15:22:04,413 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2a456b2630024494b6d05ab7c6cafdc6 2024-11-22T15:22:04,419 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T15:22:04,420 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-22T15:22:04,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/77acae05d7324f25b6bfaf7843aba5a1 is 50, key is test_row_0/C:col10/1732288923287/Put/seqid=0 2024-11-22T15:22:04,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:04,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:04,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741844_1020 (size=12001) 2024-11-22T15:22:04,513 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/77acae05d7324f25b6bfaf7843aba5a1 2024-11-22T15:22:04,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288984518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/076afb52330547c49cd0cb45be340f06 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06 2024-11-22T15:22:04,545 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:22:04,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288984525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288984528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288984528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288984529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2a456b2630024494b6d05ab7c6cafdc6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6 2024-11-22T15:22:04,570 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:22:04,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/77acae05d7324f25b6bfaf7843aba5a1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1 2024-11-22T15:22:04,600 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:22:04,602 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ed6f777bba2efed5f759348895e3133f in 328ms, sequenceid=39, compaction requested=false 2024-11-22T15:22:04,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:04,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:04,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-22T15:22:04,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-22T15:22:04,614 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-22T15:22:04,615 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6850 sec 2024-11-22T15:22:04,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.7010 sec 2024-11-22T15:22:04,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:04,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:04,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:04,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:04,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:04,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/bb1e942609ce41c2be9090fc1a272a00 is 50, key is test_row_0/A:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:04,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741845_1021 (size=16681) 2024-11-22T15:22:04,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288984805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288984809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288984815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288984825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288984818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288984939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288984943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288984944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288984947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:04,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288984949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,022 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:22:05,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-22T15:22:05,034 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-22T15:22:05,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:05,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-22T15:22:05,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T15:22:05,041 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:05,055 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:05,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:05,134 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T15:22:05,135 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T15:22:05,137 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-22T15:22:05,137 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-22T15:22:05,139 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T15:22:05,139 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T15:22:05,140 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T15:22:05,140 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T15:22:05,141 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:22:05,141 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-22T15:22:05,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T15:22:05,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288985153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/bb1e942609ce41c2be9090fc1a272a00 2024-11-22T15:22:05,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288985153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288985155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288985155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4de3cda9762a41ed9ead25798d074956 is 50, key is test_row_0/B:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:05,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288985171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741846_1022 (size=12001) 2024-11-22T15:22:05,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4de3cda9762a41ed9ead25798d074956 2024-11-22T15:22:05,216 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-22T15:22:05,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:05,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:05,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:05,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:05,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/18d10c63c73944968e172c818f755f9a is 50, key is test_row_0/C:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741847_1023 (size=12001) 2024-11-22T15:22:05,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/18d10c63c73944968e172c818f755f9a 2024-11-22T15:22:05,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/bb1e942609ce41c2be9090fc1a272a00 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00 2024-11-22T15:22:05,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00, entries=250, sequenceid=50, filesize=16.3 K 2024-11-22T15:22:05,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4de3cda9762a41ed9ead25798d074956 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956 2024-11-22T15:22:05,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T15:22:05,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/18d10c63c73944968e172c818f755f9a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a 2024-11-22T15:22:05,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T15:22:05,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed6f777bba2efed5f759348895e3133f in 677ms, sequenceid=50, compaction requested=true 2024-11-22T15:22:05,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:05,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:05,341 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T15:22:05,344 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:05,347 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:05,347 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:05,349 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:05,349 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,349 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=42.0 K 2024-11-22T15:22:05,350 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:05,350 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,351 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=35.2 K 2024-11-22T15:22:05,352 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 811a098b918240498f17800d23a78de0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732288922965 2024-11-22T15:22:05,353 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fa8ab96d7a94f70a547b2b30b426dc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732288922965 2024-11-22T15:22:05,354 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 076afb52330547c49cd0cb45be340f06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288923284 2024-11-22T15:22:05,354 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a456b2630024494b6d05ab7c6cafdc6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288923284 2024-11-22T15:22:05,355 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb1e942609ce41c2be9090fc1a272a00, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924508 2024-11-22T15:22:05,355 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4de3cda9762a41ed9ead25798d074956, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924517 2024-11-22T15:22:05,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-22T15:22:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,372 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:22:05,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:05,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:05,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:05,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a98afbfdf1a248f1859f8b8dd0dc35e8 is 50, key is test_row_0/A:col10/1732288924804/Put/seqid=0 2024-11-22T15:22:05,426 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#10 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:05,426 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#11 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:05,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741848_1024 (size=12001) 2024-11-22T15:22:05,433 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/d48ab69f07d643389ddaa337fa3ffd6e is 50, key is test_row_0/B:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:05,447 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a98afbfdf1a248f1859f8b8dd0dc35e8 2024-11-22T15:22:05,460 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/18fb3ebed6cd41448f2376e3ba95470f is 50, key is test_row_0/A:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:05,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:05,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:05,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741849_1025 (size=12104) 2024-11-22T15:22:05,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/125485b1c5d6464b8b84e84b77acfd08 is 50, key is test_row_0/B:col10/1732288924804/Put/seqid=0 2024-11-22T15:22:05,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288985501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288985501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288985516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288985518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288985519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,534 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/d48ab69f07d643389ddaa337fa3ffd6e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/d48ab69f07d643389ddaa337fa3ffd6e 2024-11-22T15:22:05,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741850_1026 (size=12104) 2024-11-22T15:22:05,564 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into d48ab69f07d643389ddaa337fa3ffd6e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:05,565 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:05,565 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288925340; duration=0sec 2024-11-22T15:22:05,565 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:05,565 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:05,566 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:05,571 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:05,571 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:05,571 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,574 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=35.2 K 2024-11-22T15:22:05,576 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e4031ecd26b41a0a4539aae86f0e5ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732288922965 2024-11-22T15:22:05,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741851_1027 (size=12001) 2024-11-22T15:22:05,578 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 77acae05d7324f25b6bfaf7843aba5a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288923284 2024-11-22T15:22:05,579 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/125485b1c5d6464b8b84e84b77acfd08 2024-11-22T15:22:05,584 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 18d10c63c73944968e172c818f755f9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924517 2024-11-22T15:22:05,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/42d351a76a82451583e10d18f9885071 is 50, key is test_row_0/C:col10/1732288924804/Put/seqid=0 2024-11-22T15:22:05,618 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#14 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:05,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,619 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/70271f10a8d043bfb562d624cc04e203 is 50, key is test_row_0/C:col10/1732288924517/Put/seqid=0 2024-11-22T15:22:05,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288985619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288985625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288985634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288985638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288985639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T15:22:05,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741852_1028 (size=12001) 2024-11-22T15:22:05,653 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/42d351a76a82451583e10d18f9885071 2024-11-22T15:22:05,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741853_1029 (size=12104) 2024-11-22T15:22:05,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a98afbfdf1a248f1859f8b8dd0dc35e8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8 2024-11-22T15:22:05,691 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:22:05,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/125485b1c5d6464b8b84e84b77acfd08 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08 2024-11-22T15:22:05,707 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:22:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/42d351a76a82451583e10d18f9885071 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071 2024-11-22T15:22:05,726 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:22:05,728 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ed6f777bba2efed5f759348895e3133f in 355ms, sequenceid=75, compaction requested=false 2024-11-22T15:22:05,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:05,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:05,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-22T15:22:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-22T15:22:05,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-22T15:22:05,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 677 msec 2024-11-22T15:22:05,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 701 msec 2024-11-22T15:22:05,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:05,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:22:05,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:05,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:05,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:05,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:05,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ac3b33115bed4141b4334aa086e848ab is 50, key is test_row_0/A:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741854_1030 (size=11997) 2024-11-22T15:22:05,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ac3b33115bed4141b4334aa086e848ab 2024-11-22T15:22:05,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/270e3550bbce42cea912151f3366e87e is 50, key is test_row_0/B:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:05,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741855_1031 (size=9657) 2024-11-22T15:22:05,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/270e3550bbce42cea912151f3366e87e 2024-11-22T15:22:05,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288985900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288985900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288985902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288985910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288985920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:05,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/7981649e97ab46f592058d57a07b7fef is 50, key is test_row_0/C:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:05,976 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/18fb3ebed6cd41448f2376e3ba95470f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18fb3ebed6cd41448f2376e3ba95470f 2024-11-22T15:22:05,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741856_1032 (size=9657) 2024-11-22T15:22:05,993 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 18fb3ebed6cd41448f2376e3ba95470f(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:05,993 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:05,993 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288925332; duration=0sec 2024-11-22T15:22:05,993 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:05,993 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:06,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288986025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288986026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288986027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288986027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288986028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/70271f10a8d043bfb562d624cc04e203 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/70271f10a8d043bfb562d624cc04e203 2024-11-22T15:22:06,096 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 70271f10a8d043bfb562d624cc04e203(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:06,097 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,097 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288925340; duration=0sec 2024-11-22T15:22:06,097 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:06,097 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-22T15:22:06,146 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-22T15:22:06,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-22T15:22:06,154 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T15:22:06,155 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:06,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:06,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288986234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288986234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288986236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288986236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288986237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T15:22:06,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T15:22:06,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:06,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/7981649e97ab46f592058d57a07b7fef 2024-11-22T15:22:06,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ac3b33115bed4141b4334aa086e848ab as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab 2024-11-22T15:22:06,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab, entries=150, sequenceid=89, filesize=11.7 K 2024-11-22T15:22:06,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/270e3550bbce42cea912151f3366e87e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e 2024-11-22T15:22:06,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e, entries=100, sequenceid=89, filesize=9.4 K 2024-11-22T15:22:06,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/7981649e97ab46f592058d57a07b7fef as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef 2024-11-22T15:22:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T15:22:06,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef, entries=100, sequenceid=89, filesize=9.4 K 2024-11-22T15:22:06,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed6f777bba2efed5f759348895e3133f in 629ms, sequenceid=89, compaction requested=true 2024-11-22T15:22:06,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,464 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:06,465 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:06,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:06,466 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:06,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-22T15:22:06,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,467 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:22:06,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:06,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:06,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:06,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:06,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,470 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:06,470 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:06,471 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,471 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18fb3ebed6cd41448f2376e3ba95470f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=35.3 K 2024-11-22T15:22:06,472 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18fb3ebed6cd41448f2376e3ba95470f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924517 2024-11-22T15:22:06,473 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:06,473 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:06,473 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,473 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/d48ab69f07d643389ddaa337fa3ffd6e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=33.0 K 2024-11-22T15:22:06,474 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a98afbfdf1a248f1859f8b8dd0dc35e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288924804 2024-11-22T15:22:06,475 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d48ab69f07d643389ddaa337fa3ffd6e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924517 2024-11-22T15:22:06,475 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac3b33115bed4141b4334aa086e848ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288925500 2024-11-22T15:22:06,476 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 125485b1c5d6464b8b84e84b77acfd08, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288924804 2024-11-22T15:22:06,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/78242f17e7ba403aa1413ea437d65090 is 50, key is test_row_0/A:col10/1732288925899/Put/seqid=0 2024-11-22T15:22:06,479 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 270e3550bbce42cea912151f3366e87e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288925500 2024-11-22T15:22:06,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741857_1033 (size=12001) 2024-11-22T15:22:06,501 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/78242f17e7ba403aa1413ea437d65090 2024-11-22T15:22:06,514 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:06,515 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/5b33c56cc1e04168a4295558e549eee4 is 50, key is test_row_0/B:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:06,516 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#20 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:06,517 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/1bc09a5bb0d344fda531ef7c79aaf625 is 50, key is test_row_0/A:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:06,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8d7463c00b534b2a8090cdf358c82202 is 50, key is test_row_0/B:col10/1732288925899/Put/seqid=0 2024-11-22T15:22:06,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:06,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:06,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288986556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288986555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288986560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288986561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288986562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741859_1035 (size=12207) 2024-11-22T15:22:06,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741858_1034 (size=12207) 2024-11-22T15:22:06,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741860_1036 (size=12001) 2024-11-22T15:22:06,590 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/5b33c56cc1e04168a4295558e549eee4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/5b33c56cc1e04168a4295558e549eee4 2024-11-22T15:22:06,590 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/1bc09a5bb0d344fda531ef7c79aaf625 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/1bc09a5bb0d344fda531ef7c79aaf625 2024-11-22T15:22:06,591 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8d7463c00b534b2a8090cdf358c82202 2024-11-22T15:22:06,606 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 5b33c56cc1e04168a4295558e549eee4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:06,607 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,607 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288926466; duration=0sec 2024-11-22T15:22:06,607 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:06,607 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:06,607 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:06,610 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 1bc09a5bb0d344fda531ef7c79aaf625(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:06,610 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,610 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288926464; duration=0sec 2024-11-22T15:22:06,610 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:06,611 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:06,611 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:06,611 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:06,611 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,612 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/70271f10a8d043bfb562d624cc04e203, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=33.0 K 2024-11-22T15:22:06,613 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 70271f10a8d043bfb562d624cc04e203, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732288924517 2024-11-22T15:22:06,613 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 42d351a76a82451583e10d18f9885071, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288924804 2024-11-22T15:22:06,614 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7981649e97ab46f592058d57a07b7fef, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288925500 2024-11-22T15:22:06,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0a9597a5f64e4794b41cae09cef188f5 is 50, key is test_row_0/C:col10/1732288925899/Put/seqid=0 2024-11-22T15:22:06,641 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#23 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:06,642 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/17ccf7de0d2c4ef293827c498d833d8b is 50, key is test_row_0/C:col10/1732288925500/Put/seqid=0 2024-11-22T15:22:06,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741861_1037 (size=12001) 2024-11-22T15:22:06,662 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0a9597a5f64e4794b41cae09cef188f5 2024-11-22T15:22:06,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288986664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288986664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288986672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/78242f17e7ba403aa1413ea437d65090 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090 2024-11-22T15:22:06,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288986675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288986680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741862_1038 (size=12207) 2024-11-22T15:22:06,690 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090, entries=150, sequenceid=114, filesize=11.7 K 2024-11-22T15:22:06,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8d7463c00b534b2a8090cdf358c82202 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202 2024-11-22T15:22:06,704 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/17ccf7de0d2c4ef293827c498d833d8b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/17ccf7de0d2c4ef293827c498d833d8b 2024-11-22T15:22:06,704 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202, entries=150, sequenceid=114, filesize=11.7 K 2024-11-22T15:22:06,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0a9597a5f64e4794b41cae09cef188f5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5 2024-11-22T15:22:06,720 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 17ccf7de0d2c4ef293827c498d833d8b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:06,720 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,721 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288926467; duration=0sec 2024-11-22T15:22:06,721 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:06,721 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:06,722 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5, entries=150, sequenceid=114, filesize=11.7 K 2024-11-22T15:22:06,723 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for ed6f777bba2efed5f759348895e3133f in 257ms, sequenceid=114, compaction requested=false 2024-11-22T15:22:06,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:06,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-22T15:22:06,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-22T15:22:06,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-22T15:22:06,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 570 msec 2024-11-22T15:22:06,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 580 msec 2024-11-22T15:22:06,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-22T15:22:06,761 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-22T15:22:06,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:06,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-22T15:22:06,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:06,766 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:06,767 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:06,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:06,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:06,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:06,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:06,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:06,885 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0e647622ef114b53b2dfff88600bfd63 is 50, key is test_row_0/A:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:06,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741863_1039 (size=16781) 2024-11-22T15:22:06,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0e647622ef114b53b2dfff88600bfd63 2024-11-22T15:22:06,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/116957e902b747ccabcbaf9230ca3004 is 50, key is test_row_0/B:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:06,922 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:06,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:06,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:06,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288986916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288986915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:06,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:06,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288986920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288986922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288986924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:06,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741864_1040 (size=12101) 2024-11-22T15:22:06,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/116957e902b747ccabcbaf9230ca3004 2024-11-22T15:22:06,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/31489670f329482e9900072930dda420 is 50, key is test_row_0/C:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:06,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741865_1041 (size=12101) 2024-11-22T15:22:06,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/31489670f329482e9900072930dda420 2024-11-22T15:22:06,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0e647622ef114b53b2dfff88600bfd63 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63 2024-11-22T15:22:07,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63, entries=250, sequenceid=133, filesize=16.4 K 2024-11-22T15:22:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/116957e902b747ccabcbaf9230ca3004 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004 2024-11-22T15:22:07,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004, entries=150, sequenceid=133, filesize=11.8 K 2024-11-22T15:22:07,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/31489670f329482e9900072930dda420 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420 2024-11-22T15:22:07,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288987028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288987028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288987030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420, entries=150, sequenceid=133, filesize=11.8 K 2024-11-22T15:22:07,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for ed6f777bba2efed5f759348895e3133f in 162ms, sequenceid=133, compaction requested=true 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:07,038 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:07,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:07,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:07,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:07,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:07,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,043 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40989 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:07,043 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:07,043 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,044 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/1bc09a5bb0d344fda531ef7c79aaf625, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=40.0 K 2024-11-22T15:22:07,045 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:07,045 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:07,045 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,045 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/5b33c56cc1e04168a4295558e549eee4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=35.5 K 2024-11-22T15:22:07,046 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bc09a5bb0d344fda531ef7c79aaf625, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288924814 2024-11-22T15:22:07,046 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b33c56cc1e04168a4295558e549eee4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288924814 2024-11-22T15:22:07,047 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78242f17e7ba403aa1413ea437d65090, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732288925899 2024-11-22T15:22:07,047 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d7463c00b534b2a8090cdf358c82202, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732288925899 2024-11-22T15:22:07,049 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 116957e902b747ccabcbaf9230ca3004, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926558 2024-11-22T15:22:07,049 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e647622ef114b53b2dfff88600bfd63, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926557 2024-11-22T15:22:07,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a057db3163ea451e96e6a3911c58f025 is 50, key is test_row_0/A:col10/1732288927036/Put/seqid=0 2024-11-22T15:22:07,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:07,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:07,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288987082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288987088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,098 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#28 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:07,099 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/297bb89b92a94260acabf49c53a578bb is 50, key is test_row_0/A:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:07,106 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:07,107 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/93bd2e34f615459d936779c178f1d802 is 50, key is test_row_0/B:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:07,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741866_1042 (size=12151) 2024-11-22T15:22:07,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a057db3163ea451e96e6a3911c58f025 2024-11-22T15:22:07,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741868_1044 (size=12409) 2024-11-22T15:22:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741867_1043 (size=12409) 2024-11-22T15:22:07,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/283aa55ae06843ddae0ccd096edf15f8 is 50, key is test_row_0/B:col10/1732288927036/Put/seqid=0 2024-11-22T15:22:07,164 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/93bd2e34f615459d936779c178f1d802 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/93bd2e34f615459d936779c178f1d802 2024-11-22T15:22:07,167 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/297bb89b92a94260acabf49c53a578bb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/297bb89b92a94260acabf49c53a578bb 2024-11-22T15:22:07,184 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 297bb89b92a94260acabf49c53a578bb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:07,185 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:07,185 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288927038; duration=0sec 2024-11-22T15:22:07,185 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:07,187 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 93bd2e34f615459d936779c178f1d802(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:07,187 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:07,187 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288927038; duration=0sec 2024-11-22T15:22:07,187 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:07,187 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:07,187 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:07,187 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:07,189 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:07,189 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:07,190 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,190 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/17ccf7de0d2c4ef293827c498d833d8b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=35.5 K 2024-11-22T15:22:07,191 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17ccf7de0d2c4ef293827c498d833d8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732288924814 2024-11-22T15:22:07,192 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a9597a5f64e4794b41cae09cef188f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732288925899 2024-11-22T15:22:07,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741869_1045 (size=12151) 2024-11-22T15:22:07,194 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31489670f329482e9900072930dda420, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926558 2024-11-22T15:22:07,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/283aa55ae06843ddae0ccd096edf15f8 2024-11-22T15:22:07,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288987192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288987193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/75e977984a3643f587d021dd04fc6fb6 is 50, key is test_row_0/C:col10/1732288927036/Put/seqid=0 2024-11-22T15:22:07,221 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#32 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:07,222 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/2ffdfabab6aa4721b5780142ed8aa825 is 50, key is test_row_0/C:col10/1732288926873/Put/seqid=0 2024-11-22T15:22:07,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:07,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288987234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288987234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288987238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741871_1047 (size=12409) 2024-11-22T15:22:07,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741870_1046 (size=12151) 2024-11-22T15:22:07,277 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/2ffdfabab6aa4721b5780142ed8aa825 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/2ffdfabab6aa4721b5780142ed8aa825 2024-11-22T15:22:07,289 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 2ffdfabab6aa4721b5780142ed8aa825(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:07,289 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:07,289 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288927038; duration=0sec 2024-11-22T15:22:07,289 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:07,289 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:07,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288987401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288987403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288987539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288987540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:07,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288987542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/75e977984a3643f587d021dd04fc6fb6 2024-11-22T15:22:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/a057db3163ea451e96e6a3911c58f025 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025 2024-11-22T15:22:07,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025, entries=150, sequenceid=154, filesize=11.9 K 2024-11-22T15:22:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/283aa55ae06843ddae0ccd096edf15f8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8 2024-11-22T15:22:07,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:07,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:07,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8, entries=150, sequenceid=154, filesize=11.9 K 2024-11-22T15:22:07,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/75e977984a3643f587d021dd04fc6fb6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6 2024-11-22T15:22:07,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288987705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288987716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6, entries=150, sequenceid=154, filesize=11.9 K 2024-11-22T15:22:07,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for ed6f777bba2efed5f759348895e3133f in 685ms, sequenceid=154, compaction requested=false 2024-11-22T15:22:07,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:07,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:07,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-22T15:22:07,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:07,854 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:22:07,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:07,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:07,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:07,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/66ef8bb51deb4eb8814d4068d4440543 is 50, key is test_row_0/A:col10/1732288927079/Put/seqid=0 2024-11-22T15:22:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:07,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741872_1048 (size=12151) 2024-11-22T15:22:07,914 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/66ef8bb51deb4eb8814d4068d4440543 2024-11-22T15:22:07,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/df711b9f31c34b2382ee8f56095dda26 is 50, key is test_row_0/B:col10/1732288927079/Put/seqid=0 2024-11-22T15:22:07,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741873_1049 (size=12151) 2024-11-22T15:22:08,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:08,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288988133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288988130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288988138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288988209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288988225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288988246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288988247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288988247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,384 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/df711b9f31c34b2382ee8f56095dda26 2024-11-22T15:22:08,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/f598c33e5c35448aa383b1f8396a2e3f is 50, key is test_row_0/C:col10/1732288927079/Put/seqid=0 2024-11-22T15:22:08,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741874_1050 (size=12151) 2024-11-22T15:22:08,438 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/f598c33e5c35448aa383b1f8396a2e3f 2024-11-22T15:22:08,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/66ef8bb51deb4eb8814d4068d4440543 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543 2024-11-22T15:22:08,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288988453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288988453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288988457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,467 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543, entries=150, sequenceid=172, filesize=11.9 K 2024-11-22T15:22:08,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/df711b9f31c34b2382ee8f56095dda26 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26 2024-11-22T15:22:08,480 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26, entries=150, sequenceid=172, filesize=11.9 K 2024-11-22T15:22:08,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/f598c33e5c35448aa383b1f8396a2e3f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f 2024-11-22T15:22:08,492 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f, entries=150, sequenceid=172, filesize=11.9 K 2024-11-22T15:22:08,496 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for ed6f777bba2efed5f759348895e3133f in 641ms, sequenceid=172, compaction requested=true 2024-11-22T15:22:08,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:08,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:08,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-22T15:22:08,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-22T15:22:08,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-22T15:22:08,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7300 sec 2024-11-22T15:22:08,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.7460 sec 2024-11-22T15:22:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:08,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:08,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:08,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:08,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/70af24b7693f45f689e559b522efbc11 is 50, key is test_row_0/A:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:08,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288988800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288988803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288988805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741875_1051 (size=12151) 2024-11-22T15:22:08,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/70af24b7693f45f689e559b522efbc11 2024-11-22T15:22:08,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2d499b2ad3dd45a39a38c5fdfc50be55 is 50, key is test_row_0/B:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:08,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741876_1052 (size=12151) 2024-11-22T15:22:08,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2d499b2ad3dd45a39a38c5fdfc50be55 2024-11-22T15:22:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-22T15:22:08,874 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-22T15:22:08,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/35abebe13b824482bc058ac2dba5884e is 50, key is test_row_0/C:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:08,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-22T15:22:08,883 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:08,884 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:08,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T15:22:08,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741877_1053 (size=12151) 2024-11-22T15:22:08,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/35abebe13b824482bc058ac2dba5884e 2024-11-22T15:22:08,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288988908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288988908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288988913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:08,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/70af24b7693f45f689e559b522efbc11 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11 2024-11-22T15:22:08,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11, entries=150, sequenceid=196, filesize=11.9 K 2024-11-22T15:22:08,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2d499b2ad3dd45a39a38c5fdfc50be55 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55 2024-11-22T15:22:08,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55, entries=150, sequenceid=196, filesize=11.9 K 2024-11-22T15:22:08,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/35abebe13b824482bc058ac2dba5884e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e 2024-11-22T15:22:08,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e, entries=150, sequenceid=196, filesize=11.9 K 2024-11-22T15:22:08,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for ed6f777bba2efed5f759348895e3133f in 191ms, sequenceid=196, compaction requested=true 2024-11-22T15:22:08,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:08,957 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:08,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:08,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:08,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:08,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:08,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:08,960 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:08,960 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:08,961 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:08,961 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/297bb89b92a94260acabf49c53a578bb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=47.7 K 2024-11-22T15:22:08,962 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:08,962 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:08,962 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:08,963 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/93bd2e34f615459d936779c178f1d802, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=47.7 K 2024-11-22T15:22:08,964 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 297bb89b92a94260acabf49c53a578bb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926558 2024-11-22T15:22:08,965 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 93bd2e34f615459d936779c178f1d802, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926558 2024-11-22T15:22:08,966 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 283aa55ae06843ddae0ccd096edf15f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732288926918 2024-11-22T15:22:08,966 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a057db3163ea451e96e6a3911c58f025, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732288926918 2024-11-22T15:22:08,967 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting df711b9f31c34b2382ee8f56095dda26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732288927075 2024-11-22T15:22:08,967 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66ef8bb51deb4eb8814d4068d4440543, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732288927075 2024-11-22T15:22:08,968 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d499b2ad3dd45a39a38c5fdfc50be55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:08,968 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70af24b7693f45f689e559b522efbc11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T15:22:08,993 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:08,994 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/769b27334df34d588cacc54ad33eaeb6 is 50, key is test_row_0/B:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:08,995 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:08,997 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ebaf0a80a5834c82a08fece47eec3c3f is 50, key is test_row_0/A:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:09,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741879_1055 (size=12595) 2024-11-22T15:22:09,033 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/769b27334df34d588cacc54ad33eaeb6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/769b27334df34d588cacc54ad33eaeb6 2024-11-22T15:22:09,039 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-22T15:22:09,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,040 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741878_1054 (size=12595) 2024-11-22T15:22:09,047 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 769b27334df34d588cacc54ad33eaeb6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:09,047 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:09,047 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=12, startTime=1732288928957; duration=0sec 2024-11-22T15:22:09,047 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:09,048 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:09,048 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:09,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/3ce7428c7e61462d8f474f64b4aac111 is 50, key is test_row_0/A:col10/1732288928802/Put/seqid=0 2024-11-22T15:22:09,050 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:09,051 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:09,051 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,051 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/2ffdfabab6aa4721b5780142ed8aa825, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=47.7 K 2024-11-22T15:22:09,052 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ffdfabab6aa4721b5780142ed8aa825, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732288926558 2024-11-22T15:22:09,052 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 75e977984a3643f587d021dd04fc6fb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732288926918 2024-11-22T15:22:09,054 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f598c33e5c35448aa383b1f8396a2e3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732288927075 2024-11-22T15:22:09,055 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 35abebe13b824482bc058ac2dba5884e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:09,070 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ebaf0a80a5834c82a08fece47eec3c3f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ebaf0a80a5834c82a08fece47eec3c3f 2024-11-22T15:22:09,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741880_1056 (size=12151) 2024-11-22T15:22:09,082 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/3ce7428c7e61462d8f474f64b4aac111 2024-11-22T15:22:09,083 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into ebaf0a80a5834c82a08fece47eec3c3f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:09,083 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:09,083 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=12, startTime=1732288928956; duration=0sec 2024-11-22T15:22:09,083 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:09,083 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:09,088 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:09,088 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/95a1b12cce754aef97c916275114d38d is 50, key is test_row_0/C:col10/1732288928765/Put/seqid=0 2024-11-22T15:22:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/192bd902f93e4095a69493335c9fb492 is 50, key is test_row_0/B:col10/1732288928802/Put/seqid=0 2024-11-22T15:22:09,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:09,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741881_1057 (size=12595) 2024-11-22T15:22:09,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741882_1058 (size=12151) 2024-11-22T15:22:09,158 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/192bd902f93e4095a69493335c9fb492 2024-11-22T15:22:09,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288989168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288989169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288989171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/98ca54e3390b4d45ad31a131c6a26111 is 50, key is test_row_0/C:col10/1732288928802/Put/seqid=0 2024-11-22T15:22:09,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T15:22:09,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741883_1059 (size=12151) 2024-11-22T15:22:09,223 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/98ca54e3390b4d45ad31a131c6a26111 2024-11-22T15:22:09,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288989225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/3ce7428c7e61462d8f474f64b4aac111 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111 2024-11-22T15:22:09,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288989237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,243 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111, entries=150, sequenceid=209, filesize=11.9 K 2024-11-22T15:22:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/192bd902f93e4095a69493335c9fb492 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492 2024-11-22T15:22:09,257 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492, entries=150, sequenceid=209, filesize=11.9 K 2024-11-22T15:22:09,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/98ca54e3390b4d45ad31a131c6a26111 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111 2024-11-22T15:22:09,272 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111, entries=150, sequenceid=209, filesize=11.9 K 2024-11-22T15:22:09,275 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed6f777bba2efed5f759348895e3133f in 235ms, sequenceid=209, compaction requested=false 2024-11-22T15:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-22T15:22:09,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-22T15:22:09,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-22T15:22:09,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 394 msec 2024-11-22T15:22:09,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:09,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:22:09,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 408 msec 2024-11-22T15:22:09,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:09,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d337a0c7c1c54cdea4d4e5db413831b4 is 50, key is test_row_0/A:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:09,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288989318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288989318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288989323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741884_1060 (size=12151) 2024-11-22T15:22:09,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288989432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288989432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288989432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-22T15:22:09,491 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-22T15:22:09,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:09,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-22T15:22:09,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:09,496 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:09,497 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:09,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:09,539 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/95a1b12cce754aef97c916275114d38d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/95a1b12cce754aef97c916275114d38d 2024-11-22T15:22:09,549 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 95a1b12cce754aef97c916275114d38d(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:09,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:09,549 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=12, startTime=1732288928958; duration=0sec 2024-11-22T15:22:09,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:09,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:09,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288989642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,652 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288989645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:09,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288989649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:09,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d337a0c7c1c54cdea4d4e5db413831b4 2024-11-22T15:22:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:09,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/505bde60d08f43a6a747b714b2827dba is 50, key is test_row_0/B:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:09,836 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:09,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:09,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741885_1061 (size=12151) 2024-11-22T15:22:09,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288989951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288989956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288989957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,990 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:09,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:09,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:09,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:09,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:10,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:10,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:10,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/505bde60d08f43a6a747b714b2827dba 2024-11-22T15:22:10,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:10,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:10,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/360a1816a8ae4e80b44bc4ae74741d7d is 50, key is test_row_0/C:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:10,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:10,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741886_1062 (size=12151) 2024-11-22T15:22:10,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/360a1816a8ae4e80b44bc4ae74741d7d 2024-11-22T15:22:10,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d337a0c7c1c54cdea4d4e5db413831b4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4 2024-11-22T15:22:10,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T15:22:10,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/505bde60d08f43a6a747b714b2827dba as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba 2024-11-22T15:22:10,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T15:22:10,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/360a1816a8ae4e80b44bc4ae74741d7d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d 2024-11-22T15:22:10,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T15:22:10,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed6f777bba2efed5f759348895e3133f in 1129ms, sequenceid=237, compaction requested=true 2024-11-22T15:22:10,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:10,417 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:10,417 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:10,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:10,419 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:10,419 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:10,419 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,419 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/769b27334df34d588cacc54ad33eaeb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.0 K 2024-11-22T15:22:10,420 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:10,420 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:10,420 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 769b27334df34d588cacc54ad33eaeb6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:10,420 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,420 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ebaf0a80a5834c82a08fece47eec3c3f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.0 K 2024-11-22T15:22:10,421 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 192bd902f93e4095a69493335c9fb492, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732288928785 2024-11-22T15:22:10,422 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 505bde60d08f43a6a747b714b2827dba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:10,422 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebaf0a80a5834c82a08fece47eec3c3f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:10,422 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ce7428c7e61462d8f474f64b4aac111, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732288928785 2024-11-22T15:22:10,424 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d337a0c7c1c54cdea4d4e5db413831b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:10,440 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:10,441 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6c0c4b9276004d0e91244bf4774cd00f is 50, key is test_row_0/B:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:10,453 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:10,454 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/aa6684aea1924baa9abd37bded5897f1 is 50, key is test_row_0/A:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:10,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:10,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-22T15:22:10,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,463 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:10,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:10,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/187691589fc34abca095072a71171138 is 50, key is test_row_0/A:col10/1732288930461/Put/seqid=0 2024-11-22T15:22:10,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741888_1064 (size=12697) 2024-11-22T15:22:10,514 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/aa6684aea1924baa9abd37bded5897f1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/aa6684aea1924baa9abd37bded5897f1 2024-11-22T15:22:10,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741887_1063 (size=12697) 2024-11-22T15:22:10,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288990515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288990517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288990523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,529 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into aa6684aea1924baa9abd37bded5897f1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:10,529 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:10,529 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288930417; duration=0sec 2024-11-22T15:22:10,530 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:10,530 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:10,530 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:10,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741889_1065 (size=14541) 2024-11-22T15:22:10,534 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:10,535 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:10,535 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,535 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/95a1b12cce754aef97c916275114d38d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.0 K 2024-11-22T15:22:10,535 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/187691589fc34abca095072a71171138 2024-11-22T15:22:10,535 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95a1b12cce754aef97c916275114d38d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732288928119 2024-11-22T15:22:10,540 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98ca54e3390b4d45ad31a131c6a26111, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732288928785 2024-11-22T15:22:10,543 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 360a1816a8ae4e80b44bc4ae74741d7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:10,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/c0d4a9d1317c4844afde506b9d6ba840 is 50, key is test_row_0/B:col10/1732288930461/Put/seqid=0 2024-11-22T15:22:10,570 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:10,571 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b16ae5ecc0994ee69d2bfc871249b24c is 50, key is test_row_0/C:col10/1732288929151/Put/seqid=0 2024-11-22T15:22:10,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741890_1066 (size=12151) 2024-11-22T15:22:10,588 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/c0d4a9d1317c4844afde506b9d6ba840 2024-11-22T15:22:10,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:10,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741891_1067 (size=12697) 2024-11-22T15:22:10,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/fe9e5cd44320474ca492cb4b05845a5c is 50, key is test_row_0/C:col10/1732288930461/Put/seqid=0 2024-11-22T15:22:10,620 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b16ae5ecc0994ee69d2bfc871249b24c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b16ae5ecc0994ee69d2bfc871249b24c 2024-11-22T15:22:10,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288990624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288990624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,629 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into b16ae5ecc0994ee69d2bfc871249b24c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:10,629 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:10,629 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288930417; duration=0sec 2024-11-22T15:22:10,629 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:10,630 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:10,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288990628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741892_1068 (size=12151) 2024-11-22T15:22:10,650 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/fe9e5cd44320474ca492cb4b05845a5c 2024-11-22T15:22:10,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/187691589fc34abca095072a71171138 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138 2024-11-22T15:22:10,668 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138, entries=200, sequenceid=249, filesize=14.2 K 2024-11-22T15:22:10,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/c0d4a9d1317c4844afde506b9d6ba840 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840 2024-11-22T15:22:10,679 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840, entries=150, sequenceid=249, filesize=11.9 K 2024-11-22T15:22:10,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/fe9e5cd44320474ca492cb4b05845a5c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c 2024-11-22T15:22:10,687 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c, entries=150, sequenceid=249, filesize=11.9 K 2024-11-22T15:22:10,689 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed6f777bba2efed5f759348895e3133f in 226ms, sequenceid=249, compaction requested=false 2024-11-22T15:22:10,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:10,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:10,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-22T15:22:10,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-22T15:22:10,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-22T15:22:10,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1940 sec 2024-11-22T15:22:10,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2070 sec 2024-11-22T15:22:10,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:10,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:10,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:10,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/9589b79de635430792198f94df449008 is 50, key is test_row_0/A:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:10,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288990848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288990848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288990850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741893_1069 (size=14741) 2024-11-22T15:22:10,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/9589b79de635430792198f94df449008 2024-11-22T15:22:10,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/15570d43181d47cfb4b99bac4b23d602 is 50, key is test_row_0/B:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:10,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741894_1070 (size=12301) 2024-11-22T15:22:10,937 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6c0c4b9276004d0e91244bf4774cd00f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6c0c4b9276004d0e91244bf4774cd00f 2024-11-22T15:22:10,951 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 6c0c4b9276004d0e91244bf4774cd00f(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:10,951 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:10,951 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288930417; duration=0sec 2024-11-22T15:22:10,951 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:10,952 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:10,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288990954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288990958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:10,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:10,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288990959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288991157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288991164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288991167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288991246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,248 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:11,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288991249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,252 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:11,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/15570d43181d47cfb4b99bac4b23d602 2024-11-22T15:22:11,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6387d87b450a4e28a52ae330ac178dd7 is 50, key is test_row_0/C:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:11,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741895_1071 (size=12301) 2024-11-22T15:22:11,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6387d87b450a4e28a52ae330ac178dd7 2024-11-22T15:22:11,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/9589b79de635430792198f94df449008 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008 2024-11-22T15:22:11,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008, entries=200, sequenceid=277, filesize=14.4 K 2024-11-22T15:22:11,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/15570d43181d47cfb4b99bac4b23d602 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602 2024-11-22T15:22:11,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602, entries=150, sequenceid=277, filesize=12.0 K 2024-11-22T15:22:11,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6387d87b450a4e28a52ae330ac178dd7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7 2024-11-22T15:22:11,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7, entries=150, sequenceid=277, filesize=12.0 K 2024-11-22T15:22:11,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed6f777bba2efed5f759348895e3133f in 584ms, sequenceid=277, compaction requested=true 2024-11-22T15:22:11,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:11,417 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:11,418 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:11,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:11,419 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41979 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:11,419 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:11,419 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,420 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/aa6684aea1924baa9abd37bded5897f1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=41.0 K 2024-11-22T15:22:11,420 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:11,420 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:11,420 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,420 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6c0c4b9276004d0e91244bf4774cd00f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.3 K 2024-11-22T15:22:11,421 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa6684aea1924baa9abd37bded5897f1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:11,421 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c0c4b9276004d0e91244bf4774cd00f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:11,421 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 187691589fc34abca095072a71171138, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732288929304 2024-11-22T15:22:11,422 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c0d4a9d1317c4844afde506b9d6ba840, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732288929316 2024-11-22T15:22:11,422 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9589b79de635430792198f94df449008, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930513 2024-11-22T15:22:11,424 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 15570d43181d47cfb4b99bac4b23d602, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930515 2024-11-22T15:22:11,447 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:11,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/0d3346771dbb454a9db1fd8ad8bd1366 is 50, key is test_row_0/B:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:11,457 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:11,458 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/27e0a0d737d94b06b78543252363df5a is 50, key is test_row_0/A:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:11,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:11,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:11,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:11,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:11,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:11,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:11,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:11,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/cd9f875a56604e0faa23ca9890f17d42 is 50, key is test_row_0/A:col10/1732288931466/Put/seqid=0 2024-11-22T15:22:11,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741897_1073 (size=12949) 2024-11-22T15:22:11,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741896_1072 (size=12949) 2024-11-22T15:22:11,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741898_1074 (size=14741) 2024-11-22T15:22:11,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/cd9f875a56604e0faa23ca9890f17d42 2024-11-22T15:22:11,543 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/0d3346771dbb454a9db1fd8ad8bd1366 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0d3346771dbb454a9db1fd8ad8bd1366 2024-11-22T15:22:11,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288991544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288991545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,556 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 0d3346771dbb454a9db1fd8ad8bd1366(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:11,556 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:11,556 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288931418; duration=0sec 2024-11-22T15:22:11,557 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:11,557 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:11,557 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:11,562 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:11,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,562 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288991555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,563 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,563 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b16ae5ecc0994ee69d2bfc871249b24c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.3 K 2024-11-22T15:22:11,565 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b16ae5ecc0994ee69d2bfc871249b24c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732288929151 2024-11-22T15:22:11,566 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting fe9e5cd44320474ca492cb4b05845a5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732288929316 2024-11-22T15:22:11,567 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6387d87b450a4e28a52ae330ac178dd7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930515 2024-11-22T15:22:11,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3ea0de07dd5f42c9bf4435214445f35a is 50, key is test_row_0/B:col10/1732288931466/Put/seqid=0 2024-11-22T15:22:11,584 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:11,584 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/7dcfacb44ab04a958620c88fd35406d4 is 50, key is test_row_0/C:col10/1732288930515/Put/seqid=0 2024-11-22T15:22:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741899_1075 (size=12301) 2024-11-22T15:22:11,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3ea0de07dd5f42c9bf4435214445f35a 2024-11-22T15:22:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-22T15:22:11,605 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-22T15:22:11,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-22T15:22:11,612 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:11,613 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:11,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:11,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/561a87ed7c1943d796e5a5b04d163cad is 50, key is test_row_0/C:col10/1732288931466/Put/seqid=0 2024-11-22T15:22:11,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741900_1076 (size=12949) 2024-11-22T15:22:11,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741901_1077 (size=12301) 2024-11-22T15:22:11,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288991657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288991657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288991664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,669 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/7dcfacb44ab04a958620c88fd35406d4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7dcfacb44ab04a958620c88fd35406d4 2024-11-22T15:22:11,677 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 7dcfacb44ab04a958620c88fd35406d4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:11,677 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:11,678 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288931418; duration=0sec 2024-11-22T15:22:11,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:11,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:11,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:11,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:11,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288991862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288991863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288991871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:11,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:11,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:11,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:11,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:11,938 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/27e0a0d737d94b06b78543252363df5a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/27e0a0d737d94b06b78543252363df5a 2024-11-22T15:22:11,948 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 27e0a0d737d94b06b78543252363df5a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:11,948 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:11,948 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288931417; duration=0sec 2024-11-22T15:22:11,948 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:11,948 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:12,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/561a87ed7c1943d796e5a5b04d163cad 2024-11-22T15:22:12,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/cd9f875a56604e0faa23ca9890f17d42 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42 2024-11-22T15:22:12,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42, entries=200, sequenceid=289, filesize=14.4 K 2024-11-22T15:22:12,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3ea0de07dd5f42c9bf4435214445f35a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a 2024-11-22T15:22:12,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a, entries=150, sequenceid=289, filesize=12.0 K 2024-11-22T15:22:12,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/561a87ed7c1943d796e5a5b04d163cad as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad 2024-11-22T15:22:12,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad, entries=150, sequenceid=289, filesize=12.0 K 2024-11-22T15:22:12,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed6f777bba2efed5f759348895e3133f in 666ms, sequenceid=289, compaction requested=false 2024-11-22T15:22:12,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:12,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:22:12,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:12,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:12,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:12,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/e1a8c41c332847b99f82c46d7eabc540 is 50, key is test_row_0/A:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:12,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288992202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288992202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:12,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288992199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741902_1078 (size=12301) 2024-11-22T15:22:12,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/e1a8c41c332847b99f82c46d7eabc540 2024-11-22T15:22:12,244 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ebcf409d10e246008c47a64fb8ffe19d is 50, key is test_row_0/B:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:12,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288992321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288992321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288992326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741903_1079 (size=12301) 2024-11-22T15:22:12,400 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288992530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288992538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288992538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:12,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ebcf409d10e246008c47a64fb8ffe19d 2024-11-22T15:22:12,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6d0358d4f9be4a679654a2fd4f1eae53 is 50, key is test_row_0/C:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:12,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741904_1080 (size=12301) 2024-11-22T15:22:12,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6d0358d4f9be4a679654a2fd4f1eae53 2024-11-22T15:22:12,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/e1a8c41c332847b99f82c46d7eabc540 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540 2024-11-22T15:22:12,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288992845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288992845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540, entries=150, sequenceid=319, filesize=12.0 K 2024-11-22T15:22:12,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288992846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ebcf409d10e246008c47a64fb8ffe19d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d 2024-11-22T15:22:12,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:12,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:12,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:12,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:12,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d, entries=150, sequenceid=319, filesize=12.0 K 2024-11-22T15:22:12,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6d0358d4f9be4a679654a2fd4f1eae53 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53 2024-11-22T15:22:12,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53, entries=150, sequenceid=319, filesize=12.0 K 2024-11-22T15:22:12,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ed6f777bba2efed5f759348895e3133f in 723ms, sequenceid=319, compaction requested=true 2024-11-22T15:22:12,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:12,899 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:12,899 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:12,901 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:12,901 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:12,901 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:12,901 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:12,901 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,901 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:12,902 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0d3346771dbb454a9db1fd8ad8bd1366, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.7 K 2024-11-22T15:22:12,902 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/27e0a0d737d94b06b78543252363df5a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=39.1 K 2024-11-22T15:22:12,902 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 27e0a0d737d94b06b78543252363df5a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930515 2024-11-22T15:22:12,902 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d3346771dbb454a9db1fd8ad8bd1366, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930515 2024-11-22T15:22:12,903 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ea0de07dd5f42c9bf4435214445f35a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732288930842 2024-11-22T15:22:12,903 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting cd9f875a56604e0faa23ca9890f17d42, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732288930842 2024-11-22T15:22:12,903 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebcf409d10e246008c47a64fb8ffe19d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:12,903 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e1a8c41c332847b99f82c46d7eabc540, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:12,926 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:12,927 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/c75dd94ec3e84e7bbb10ad008eadcc24 is 50, key is test_row_0/B:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:12,938 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:12,939 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/26a518ac9f8f417080875fa31d9e8ab4 is 50, key is test_row_0/A:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:12,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741905_1081 (size=13051) 2024-11-22T15:22:13,005 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/c75dd94ec3e84e7bbb10ad008eadcc24 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c75dd94ec3e84e7bbb10ad008eadcc24 2024-11-22T15:22:13,013 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into c75dd94ec3e84e7bbb10ad008eadcc24(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:13,013 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:13,014 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288932899; duration=0sec 2024-11-22T15:22:13,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:13,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:13,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:13,016 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:13,017 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:13,017 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:13,017 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7dcfacb44ab04a958620c88fd35406d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.7 K 2024-11-22T15:22:13,018 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dcfacb44ab04a958620c88fd35406d4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732288930515 2024-11-22T15:22:13,018 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 561a87ed7c1943d796e5a5b04d163cad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732288930842 2024-11-22T15:22:13,019 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d0358d4f9be4a679654a2fd4f1eae53, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:13,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:13,032 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:13,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:13,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:13,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741906_1082 (size=13051) 2024-11-22T15:22:13,049 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/26a518ac9f8f417080875fa31d9e8ab4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/26a518ac9f8f417080875fa31d9e8ab4 2024-11-22T15:22:13,055 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#68 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:13,056 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/89286c1fabbf4e7cb7f0f5511e90bd3b is 50, key is test_row_0/C:col10/1732288932172/Put/seqid=0 2024-11-22T15:22:13,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/8297579f02ea4eeb8255f13fe7145a62 is 50, key is test_row_0/A:col10/1732288932194/Put/seqid=0 2024-11-22T15:22:13,079 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 26a518ac9f8f417080875fa31d9e8ab4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:13,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:13,080 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288932899; duration=0sec 2024-11-22T15:22:13,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:13,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:13,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741907_1083 (size=13051) 2024-11-22T15:22:13,141 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/89286c1fabbf4e7cb7f0f5511e90bd3b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/89286c1fabbf4e7cb7f0f5511e90bd3b 2024-11-22T15:22:13,155 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 89286c1fabbf4e7cb7f0f5511e90bd3b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:13,155 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:13,155 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288932899; duration=0sec 2024-11-22T15:22:13,156 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:13,156 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:13,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741908_1084 (size=12301) 2024-11-22T15:22:13,158 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/8297579f02ea4eeb8255f13fe7145a62 2024-11-22T15:22:13,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ac26091912774439814fe8bad58d0fc1 is 50, key is test_row_0/B:col10/1732288932194/Put/seqid=0 2024-11-22T15:22:13,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741909_1085 (size=12301) 2024-11-22T15:22:13,240 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ac26091912774439814fe8bad58d0fc1 2024-11-22T15:22:13,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/eaca8dd26ca547cabdcc38c622f349f4 is 50, key is test_row_0/C:col10/1732288932194/Put/seqid=0 2024-11-22T15:22:13,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741910_1086 (size=12301) 2024-11-22T15:22:13,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:13,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:13,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288993436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288993442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288993441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288993552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288993564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288993567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,716 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/eaca8dd26ca547cabdcc38c622f349f4 2024-11-22T15:22:13,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/8297579f02ea4eeb8255f13fe7145a62 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62 2024-11-22T15:22:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:13,735 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62, entries=150, sequenceid=329, filesize=12.0 K 2024-11-22T15:22:13,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ac26091912774439814fe8bad58d0fc1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1 2024-11-22T15:22:13,747 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1, entries=150, sequenceid=329, filesize=12.0 K 2024-11-22T15:22:13,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/eaca8dd26ca547cabdcc38c622f349f4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4 2024-11-22T15:22:13,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288993768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288993771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288993774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:13,786 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4, entries=150, sequenceid=329, filesize=12.0 K 2024-11-22T15:22:13,787 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for ed6f777bba2efed5f759348895e3133f in 755ms, sequenceid=329, compaction requested=false 2024-11-22T15:22:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:13,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-22T15:22:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-22T15:22:13,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-22T15:22:13,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1750 sec 2024-11-22T15:22:13,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.1810 sec 2024-11-22T15:22:14,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:14,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:14,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d1da24f0322c4ed49c8ffbd7344c3874 is 50, key is test_row_0/A:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288994101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288994103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288994106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741911_1087 (size=14741) 2024-11-22T15:22:14,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d1da24f0322c4ed49c8ffbd7344c3874 2024-11-22T15:22:14,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ff7c2010c34d430d85ba369ada86341f is 50, key is test_row_0/B:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288994210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288994210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741912_1088 (size=12301) 2024-11-22T15:22:14,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ff7c2010c34d430d85ba369ada86341f 2024-11-22T15:22:14,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288994223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3a702fdb647a4cd79db2cea53f541bfd is 50, key is test_row_0/C:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741913_1089 (size=12301) 2024-11-22T15:22:14,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3a702fdb647a4cd79db2cea53f541bfd 2024-11-22T15:22:14,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/d1da24f0322c4ed49c8ffbd7344c3874 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874 2024-11-22T15:22:14,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874, entries=200, sequenceid=359, filesize=14.4 K 2024-11-22T15:22:14,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ff7c2010c34d430d85ba369ada86341f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f 2024-11-22T15:22:14,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f, entries=150, sequenceid=359, filesize=12.0 K 2024-11-22T15:22:14,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3a702fdb647a4cd79db2cea53f541bfd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd 2024-11-22T15:22:14,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd, entries=150, sequenceid=359, filesize=12.0 K 2024-11-22T15:22:14,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for ed6f777bba2efed5f759348895e3133f in 282ms, sequenceid=359, compaction requested=true 2024-11-22T15:22:14,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:14,375 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:14,377 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:14,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:14,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:14,377 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:14,378 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:14,378 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:14,379 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/26a518ac9f8f417080875fa31d9e8ab4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=39.2 K 2024-11-22T15:22:14,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:14,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:14,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:14,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:14,383 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:14,383 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:14,384 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:14,384 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c75dd94ec3e84e7bbb10ad008eadcc24, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.8 K 2024-11-22T15:22:14,385 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c75dd94ec3e84e7bbb10ad008eadcc24, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:14,385 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26a518ac9f8f417080875fa31d9e8ab4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:14,386 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ac26091912774439814fe8bad58d0fc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732288932192 2024-11-22T15:22:14,386 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8297579f02ea4eeb8255f13fe7145a62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732288932192 2024-11-22T15:22:14,387 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1da24f0322c4ed49c8ffbd7344c3874, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:14,387 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ff7c2010c34d430d85ba369ada86341f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:14,422 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#75 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:14,423 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/57e764fff3f34308bafe721016b79b7a is 50, key is test_row_0/A:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:14,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:14,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:14,451 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#76 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:14,452 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/156f9714ca2b4ad084cd7dd05ba26242 is 50, key is test_row_0/B:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/6274db811c1a4118a78d06f141b9d6ab is 50, key is test_row_0/A:col10/1732288934436/Put/seqid=0 2024-11-22T15:22:14,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741914_1090 (size=13153) 2024-11-22T15:22:14,513 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/57e764fff3f34308bafe721016b79b7a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/57e764fff3f34308bafe721016b79b7a 2024-11-22T15:22:14,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741915_1091 (size=13153) 2024-11-22T15:22:14,536 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 57e764fff3f34308bafe721016b79b7a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:14,536 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:14,536 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288934375; duration=0sec 2024-11-22T15:22:14,538 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:14,538 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:14,538 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:14,540 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:14,540 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:14,540 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:14,541 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/89286c1fabbf4e7cb7f0f5511e90bd3b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.8 K 2024-11-22T15:22:14,545 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89286c1fabbf4e7cb7f0f5511e90bd3b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732288931534 2024-11-22T15:22:14,546 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaca8dd26ca547cabdcc38c622f349f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732288932192 2024-11-22T15:22:14,548 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a702fdb647a4cd79db2cea53f541bfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:14,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741916_1092 (size=14741) 2024-11-22T15:22:14,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/6274db811c1a4118a78d06f141b9d6ab 2024-11-22T15:22:14,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6f937da304cf49b38ead0eaafb9650f1 is 50, key is test_row_0/B:col10/1732288934436/Put/seqid=0 2024-11-22T15:22:14,582 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:14,583 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b45c5789ce4446b0a2caa5a862490b8c is 50, key is test_row_0/C:col10/1732288933431/Put/seqid=0 2024-11-22T15:22:14,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741918_1094 (size=13153) 2024-11-22T15:22:14,633 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b45c5789ce4446b0a2caa5a862490b8c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b45c5789ce4446b0a2caa5a862490b8c 2024-11-22T15:22:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741917_1093 (size=12301) 2024-11-22T15:22:14,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6f937da304cf49b38ead0eaafb9650f1 2024-11-22T15:22:14,654 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into b45c5789ce4446b0a2caa5a862490b8c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:14,654 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:14,655 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288934379; duration=0sec 2024-11-22T15:22:14,655 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:14,655 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:14,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b9b1df51a5de43e78d3249ca187853de is 50, key is test_row_0/C:col10/1732288934436/Put/seqid=0 2024-11-22T15:22:14,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288994682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288994687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741919_1095 (size=12301) 2024-11-22T15:22:14,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288994694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288994805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288994815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:14,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288994819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:14,940 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/156f9714ca2b4ad084cd7dd05ba26242 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/156f9714ca2b4ad084cd7dd05ba26242 2024-11-22T15:22:14,950 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 156f9714ca2b4ad084cd7dd05ba26242(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:14,950 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:14,950 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288934377; duration=0sec 2024-11-22T15:22:14,950 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:14,950 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:15,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288995017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288995025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288995029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b9b1df51a5de43e78d3249ca187853de 2024-11-22T15:22:15,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/6274db811c1a4118a78d06f141b9d6ab as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab 2024-11-22T15:22:15,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab, entries=200, sequenceid=371, filesize=14.4 K 2024-11-22T15:22:15,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6f937da304cf49b38ead0eaafb9650f1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1 2024-11-22T15:22:15,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1, entries=150, sequenceid=371, filesize=12.0 K 2024-11-22T15:22:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/b9b1df51a5de43e78d3249ca187853de as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de 2024-11-22T15:22:15,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de, entries=150, sequenceid=371, filesize=12.0 K 2024-11-22T15:22:15,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed6f777bba2efed5f759348895e3133f in 724ms, sequenceid=371, compaction requested=false 2024-11-22T15:22:15,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:15,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:15,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:15,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 is 50, key is test_row_0/A:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:15,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288995297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288995301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741920_1096 (size=14741) 2024-11-22T15:22:15,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288995330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288995333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288995345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288995405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288995406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288995614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288995616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 2024-11-22T15:22:15,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-22T15:22:15,736 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-22T15:22:15,742 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-22T15:22:15,745 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:15,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:15,747 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:15,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:15,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/666a329fca07492c9d5871685b2b2589 is 50, key is test_row_0/B:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:15,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741921_1097 (size=12301) 2024-11-22T15:22:15,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/666a329fca07492c9d5871685b2b2589 2024-11-22T15:22:15,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/d01783af10144f768279d0909527b953 is 50, key is test_row_0/C:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:15,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288995838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:15,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288995843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741922_1098 (size=12301) 2024-11-22T15:22:15,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/d01783af10144f768279d0909527b953 2024-11-22T15:22:15,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:15,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288995856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 2024-11-22T15:22:15,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340, entries=200, sequenceid=399, filesize=14.4 K 2024-11-22T15:22:15,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/666a329fca07492c9d5871685b2b2589 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589 2024-11-22T15:22:15,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589, entries=150, sequenceid=399, filesize=12.0 K 2024-11-22T15:22:15,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/d01783af10144f768279d0909527b953 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953 2024-11-22T15:22:15,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:15,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:15,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:15,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:15,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:15,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953, entries=150, sequenceid=399, filesize=12.0 K 2024-11-22T15:22:15,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed6f777bba2efed5f759348895e3133f in 639ms, sequenceid=399, compaction requested=true 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:15,907 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:15,907 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:15,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:15,908 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:15,908 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:15,909 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:15,909 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/156f9714ca2b4ad084cd7dd05ba26242, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.9 K 2024-11-22T15:22:15,909 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:15,909 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:15,909 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:15,909 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/57e764fff3f34308bafe721016b79b7a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=41.6 K 2024-11-22T15:22:15,910 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57e764fff3f34308bafe721016b79b7a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:15,910 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 156f9714ca2b4ad084cd7dd05ba26242, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:15,910 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6274db811c1a4118a78d06f141b9d6ab, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732288934431 2024-11-22T15:22:15,910 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f937da304cf49b38ead0eaafb9650f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732288934436 2024-11-22T15:22:15,911 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4cb6e13c9284a6ba7db2fc7ec2dd340, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934588 2024-11-22T15:22:15,911 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 666a329fca07492c9d5871685b2b2589, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934676 2024-11-22T15:22:15,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:15,929 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:15,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:15,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:15,930 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ee2d60df013b48a7b157bc18d9eac9ed is 50, key is test_row_0/A:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:15,942 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:15,943 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/a3d27f808b644e209a19c25ef84c1000 is 50, key is test_row_0/B:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:15,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/10adab8e7c90487ab0e169d132fe9623 is 50, key is test_row_1/A:col10/1732288935926/Put/seqid=0 2024-11-22T15:22:16,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741923_1099 (size=13255) 2024-11-22T15:22:16,017 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ee2d60df013b48a7b157bc18d9eac9ed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ee2d60df013b48a7b157bc18d9eac9ed 2024-11-22T15:22:16,024 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into ee2d60df013b48a7b157bc18d9eac9ed(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:16,025 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:16,025 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288935907; duration=0sec 2024-11-22T15:22:16,025 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:16,025 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:16,025 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:16,026 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:16,026 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:16,027 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,027 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b45c5789ce4446b0a2caa5a862490b8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=36.9 K 2024-11-22T15:22:16,027 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b45c5789ce4446b0a2caa5a862490b8c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732288933431 2024-11-22T15:22:16,028 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9b1df51a5de43e78d3249ca187853de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732288934436 2024-11-22T15:22:16,029 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d01783af10144f768279d0909527b953, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934676 2024-11-22T15:22:16,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741924_1100 (size=13255) 2024-11-22T15:22:16,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288996030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,041 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/a3d27f808b644e209a19c25ef84c1000 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/a3d27f808b644e209a19c25ef84c1000 2024-11-22T15:22:16,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288996037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:16,049 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into a3d27f808b644e209a19c25ef84c1000(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:16,049 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:16,049 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288935907; duration=0sec 2024-11-22T15:22:16,049 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:16,050 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:16,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741925_1101 (size=9857) 2024-11-22T15:22:16,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/10adab8e7c90487ab0e169d132fe9623 2024-11-22T15:22:16,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,058 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,059 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/344b1e183d14413980af448583ebe3bc is 50, key is test_row_0/C:col10/1732288934676/Put/seqid=0 2024-11-22T15:22:16,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741926_1102 (size=13255) 2024-11-22T15:22:16,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/33cb133859654c199206a345938c04e2 is 50, key is test_row_1/B:col10/1732288935926/Put/seqid=0 2024-11-22T15:22:16,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288996139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288996150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741927_1103 (size=9857) 2024-11-22T15:22:16,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/33cb133859654c199206a345938c04e2 2024-11-22T15:22:16,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1df02971e201472dade34f1e8fbe710b is 50, key is test_row_1/C:col10/1732288935926/Put/seqid=0 2024-11-22T15:22:16,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741928_1104 (size=9857) 2024-11-22T15:22:16,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1df02971e201472dade34f1e8fbe710b 2024-11-22T15:22:16,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/10adab8e7c90487ab0e169d132fe9623 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623 2024-11-22T15:22:16,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623, entries=100, sequenceid=411, filesize=9.6 K 2024-11-22T15:22:16,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/33cb133859654c199206a345938c04e2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2 2024-11-22T15:22:16,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2, entries=100, sequenceid=411, filesize=9.6 K 2024-11-22T15:22:16,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1df02971e201472dade34f1e8fbe710b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b 2024-11-22T15:22:16,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b, entries=100, sequenceid=411, filesize=9.6 K 2024-11-22T15:22:16,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed6f777bba2efed5f759348895e3133f in 372ms, sequenceid=411, compaction requested=false 2024-11-22T15:22:16,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:16,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:16,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:22:16,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:16,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:16,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811d4ad9362f43eb8ff7dcd1b6a8a07d is 50, key is test_row_0/A:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:16,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288996439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288996444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741929_1105 (size=14741) 2024-11-22T15:22:16,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811d4ad9362f43eb8ff7dcd1b6a8a07d 2024-11-22T15:22:16,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3a825cf99a124868b4dacd70433f9020 is 50, key is test_row_0/B:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:16,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741930_1106 (size=12301) 2024-11-22T15:22:16,530 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/344b1e183d14413980af448583ebe3bc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/344b1e183d14413980af448583ebe3bc 2024-11-22T15:22:16,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288996546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288996546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,552 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 344b1e183d14413980af448583ebe3bc(size=12.9 K), total size for store is 22.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:16,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:16,553 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288935907; duration=0sec 2024-11-22T15:22:16,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:16,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:16,691 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288996752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288996752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:16,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:16,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288996844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:16,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:16,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:16,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288996864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288996870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:16,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3a825cf99a124868b4dacd70433f9020 2024-11-22T15:22:16,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/5f9cfc5cefab442dab88d0cd7565764f is 50, key is test_row_0/C:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:16,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741931_1107 (size=12301) 2024-11-22T15:22:16,984 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/5f9cfc5cefab442dab88d0cd7565764f 2024-11-22T15:22:16,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/811d4ad9362f43eb8ff7dcd1b6a8a07d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d 2024-11-22T15:22:17,000 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d, entries=200, sequenceid=439, filesize=14.4 K 2024-11-22T15:22:17,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/3a825cf99a124868b4dacd70433f9020 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020 2024-11-22T15:22:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020, entries=150, sequenceid=439, filesize=12.0 K 2024-11-22T15:22:17,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/5f9cfc5cefab442dab88d0cd7565764f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f 2024-11-22T15:22:17,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f, entries=150, sequenceid=439, filesize=12.0 K 2024-11-22T15:22:17,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed6f777bba2efed5f759348895e3133f in 675ms, sequenceid=439, compaction requested=true 2024-11-22T15:22:17,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:17,032 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:17,032 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:17,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:17,033 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37853 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:17,034 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:17,034 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,034 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ee2d60df013b48a7b157bc18d9eac9ed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.0 K 2024-11-22T15:22:17,034 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:17,034 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:17,034 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,035 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/a3d27f808b644e209a19c25ef84c1000, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=34.6 K 2024-11-22T15:22:17,037 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a3d27f808b644e209a19c25ef84c1000, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934676 2024-11-22T15:22:17,037 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee2d60df013b48a7b157bc18d9eac9ed, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934676 2024-11-22T15:22:17,037 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 33cb133859654c199206a345938c04e2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732288935295 2024-11-22T15:22:17,038 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10adab8e7c90487ab0e169d132fe9623, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732288935295 2024-11-22T15:22:17,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a825cf99a124868b4dacd70433f9020, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:17,038 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 811d4ad9362f43eb8ff7dcd1b6a8a07d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:17,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:17,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:17,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:17,088 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:17,089 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/eeb965a4e2694a459762d877fe40a9e3 is 50, key is test_row_0/B:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:17,099 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:17,100 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/18a95b04998e4cd9a2defab662b25301 is 50, key is test_row_0/A:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:17,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/dc67e058c89446d29d6a7bfb00bec074 is 50, key is test_row_0/A:col10/1732288936377/Put/seqid=0 2024-11-22T15:22:17,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288997158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288997165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741933_1109 (size=13357) 2024-11-22T15:22:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741932_1108 (size=13357) 2024-11-22T15:22:17,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/eeb965a4e2694a459762d877fe40a9e3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/eeb965a4e2694a459762d877fe40a9e3 2024-11-22T15:22:17,225 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into eeb965a4e2694a459762d877fe40a9e3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:17,226 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:17,226 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288937032; duration=0sec 2024-11-22T15:22:17,226 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:17,226 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:17,226 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:17,229 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:17,229 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:17,229 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741934_1110 (size=12301) 2024-11-22T15:22:17,231 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/344b1e183d14413980af448583ebe3bc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=34.6 K 2024-11-22T15:22:17,231 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 344b1e183d14413980af448583ebe3bc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732288934676 2024-11-22T15:22:17,232 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1df02971e201472dade34f1e8fbe710b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732288935295 2024-11-22T15:22:17,233 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f9cfc5cefab442dab88d0cd7565764f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:17,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288997269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,280 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:17,281 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/df5a33d6dccc4ebd929e38da34f21d49 is 50, key is test_row_0/C:col10/1732288936010/Put/seqid=0 2024-11-22T15:22:17,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288997280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,314 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741935_1111 (size=13357) 2024-11-22T15:22:17,344 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/df5a33d6dccc4ebd929e38da34f21d49 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/df5a33d6dccc4ebd929e38da34f21d49 2024-11-22T15:22:17,356 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into df5a33d6dccc4ebd929e38da34f21d49(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:17,356 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:17,357 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288937032; duration=0sec 2024-11-22T15:22:17,357 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:17,357 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:17,468 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288997475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288997498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,593 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/18a95b04998e4cd9a2defab662b25301 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18a95b04998e4cd9a2defab662b25301 2024-11-22T15:22:17,606 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 18a95b04998e4cd9a2defab662b25301(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:17,606 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:17,606 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288937031; duration=0sec 2024-11-22T15:22:17,606 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:17,606 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:17,621 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/dc67e058c89446d29d6a7bfb00bec074 2024-11-22T15:22:17,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/cc7a273680594f9cbf2baf77a67d235a is 50, key is test_row_0/B:col10/1732288936377/Put/seqid=0 2024-11-22T15:22:17,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741936_1112 (size=12301) 2024-11-22T15:22:17,780 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288997791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288997808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:17,943 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:17,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:17,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/cc7a273680594f9cbf2baf77a67d235a 2024-11-22T15:22:18,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/66507102558c4d2a8bfe2a18da0fe789 is 50, key is test_row_0/C:col10/1732288936377/Put/seqid=0 2024-11-22T15:22:18,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741937_1113 (size=12301) 2024-11-22T15:22:18,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/66507102558c4d2a8bfe2a18da0fe789 2024-11-22T15:22:18,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/dc67e058c89446d29d6a7bfb00bec074 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074 2024-11-22T15:22:18,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074, entries=150, sequenceid=452, filesize=12.0 K 2024-11-22T15:22:18,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/cc7a273680594f9cbf2baf77a67d235a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a 2024-11-22T15:22:18,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a, entries=150, sequenceid=452, filesize=12.0 K 2024-11-22T15:22:18,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/66507102558c4d2a8bfe2a18da0fe789 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789 2024-11-22T15:22:18,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789, entries=150, sequenceid=452, filesize=12.0 K 2024-11-22T15:22:18,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed6f777bba2efed5f759348895e3133f in 1226ms, sequenceid=452, compaction requested=false 2024-11-22T15:22:18,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:18,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:18,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:18,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/45b1c98712304ddf8ed1bcad2d11309e is 50, key is test_row_0/A:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:18,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288998341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288998352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741938_1114 (size=14741) 2024-11-22T15:22:18,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/45b1c98712304ddf8ed1bcad2d11309e 2024-11-22T15:22:18,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/777c6d4e2f7b437bb85de796bc164ab9 is 50, key is test_row_0/B:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:18,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741939_1115 (size=12301) 2024-11-22T15:22:18,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/777c6d4e2f7b437bb85de796bc164ab9 2024-11-22T15:22:18,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288998454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288998456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a421cd99cf03463c83d714c526ba5947 is 50, key is test_row_0/C:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:18,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741940_1116 (size=12301) 2024-11-22T15:22:18,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288998659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288998663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,739 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732288998854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,857 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:18,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732288998871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,877 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:18,894 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:18,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732288998902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,906 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4213 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:18,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a421cd99cf03463c83d714c526ba5947 2024-11-22T15:22:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/45b1c98712304ddf8ed1bcad2d11309e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e 2024-11-22T15:22:18,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e, entries=200, sequenceid=480, filesize=14.4 K 2024-11-22T15:22:18,958 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-22T15:22:18,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/777c6d4e2f7b437bb85de796bc164ab9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9 2024-11-22T15:22:18,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288998965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288998967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:18,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9, entries=150, sequenceid=480, filesize=12.0 K 2024-11-22T15:22:18,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a421cd99cf03463c83d714c526ba5947 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947 2024-11-22T15:22:18,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947, entries=150, sequenceid=480, filesize=12.0 K 2024-11-22T15:22:18,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed6f777bba2efed5f759348895e3133f in 680ms, sequenceid=480, compaction requested=true 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:18,980 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:18,981 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:18,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:18,982 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:18,982 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:18,982 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,982 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18a95b04998e4cd9a2defab662b25301, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=39.5 K 2024-11-22T15:22:18,982 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:18,982 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:18,983 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:18,983 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/eeb965a4e2694a459762d877fe40a9e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.1 K 2024-11-22T15:22:18,983 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18a95b04998e4cd9a2defab662b25301, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:18,984 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting eeb965a4e2694a459762d877fe40a9e3, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:18,984 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc67e058c89446d29d6a7bfb00bec074, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732288936377 2024-11-22T15:22:18,984 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting cc7a273680594f9cbf2baf77a67d235a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732288936377 2024-11-22T15:22:18,984 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45b1c98712304ddf8ed1bcad2d11309e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:18,985 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 777c6d4e2f7b437bb85de796bc164ab9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:19,013 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:19,013 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/0acb1538d08e4d1abedece939d768789 is 50, key is test_row_0/B:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:19,033 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#103 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:19,034 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/02971d1af35349c1a7c0a8b6c152449e is 50, key is test_row_0/A:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:19,052 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:19,058 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:19,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741941_1117 (size=13459) 2024-11-22T15:22:19,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741942_1118 (size=13459) 2024-11-22T15:22:19,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/47f6fe33dadc4437b1c765ac4590a871 is 50, key is test_row_0/A:col10/1732288938328/Put/seqid=0 2024-11-22T15:22:19,111 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/0acb1538d08e4d1abedece939d768789 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0acb1538d08e4d1abedece939d768789 2024-11-22T15:22:19,119 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 0acb1538d08e4d1abedece939d768789(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:19,119 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:19,119 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288938980; duration=0sec 2024-11-22T15:22:19,119 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:19,119 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:19,120 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:19,121 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:19,121 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:19,122 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:19,122 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/df5a33d6dccc4ebd929e38da34f21d49, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.1 K 2024-11-22T15:22:19,123 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting df5a33d6dccc4ebd929e38da34f21d49, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732288936010 2024-11-22T15:22:19,123 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 66507102558c4d2a8bfe2a18da0fe789, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732288936377 2024-11-22T15:22:19,124 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a421cd99cf03463c83d714c526ba5947, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741943_1119 (size=12301) 2024-11-22T15:22:19,153 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/47f6fe33dadc4437b1c765ac4590a871 2024-11-22T15:22:19,162 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#105 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:19,163 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9f7d13aa597a4dc1b193b4a66bd21909 is 50, key is test_row_0/C:col10/1732288937136/Put/seqid=0 2024-11-22T15:22:19,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8637015f862440d48e932665beac970c is 50, key is test_row_0/B:col10/1732288938328/Put/seqid=0 2024-11-22T15:22:19,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741944_1120 (size=13459) 2024-11-22T15:22:19,243 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9f7d13aa597a4dc1b193b4a66bd21909 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9f7d13aa597a4dc1b193b4a66bd21909 2024-11-22T15:22:19,256 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 9f7d13aa597a4dc1b193b4a66bd21909(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:19,256 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:19,256 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=13, startTime=1732288938980; duration=0sec 2024-11-22T15:22:19,256 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:19,256 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:19,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741945_1121 (size=12301) 2024-11-22T15:22:19,263 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8637015f862440d48e932665beac970c 2024-11-22T15:22:19,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3f97ed1d2eca435abe80af72a27d8c04 is 50, key is test_row_0/C:col10/1732288938328/Put/seqid=0 2024-11-22T15:22:19,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741946_1122 (size=12301) 2024-11-22T15:22:19,318 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3f97ed1d2eca435abe80af72a27d8c04 2024-11-22T15:22:19,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/47f6fe33dadc4437b1c765ac4590a871 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871 2024-11-22T15:22:19,338 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871, entries=150, sequenceid=491, filesize=12.0 K 2024-11-22T15:22:19,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/8637015f862440d48e932665beac970c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c 2024-11-22T15:22:19,347 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c, entries=150, sequenceid=491, filesize=12.0 K 2024-11-22T15:22:19,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3f97ed1d2eca435abe80af72a27d8c04 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04 2024-11-22T15:22:19,363 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04, entries=150, sequenceid=491, filesize=12.0 K 2024-11-22T15:22:19,364 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for ed6f777bba2efed5f759348895e3133f in 306ms, sequenceid=491, compaction requested=false 2024-11-22T15:22:19,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:19,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:19,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-22T15:22:19,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-22T15:22:19,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-22T15:22:19,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6190 sec 2024-11-22T15:22:19,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 3.6260 sec 2024-11-22T15:22:19,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:19,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:19,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:19,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/93daaf6a55ef4d0b8ae496ac2f6b4bee is 50, key is test_row_0/A:col10/1732288939498/Put/seqid=0 2024-11-22T15:22:19,515 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/02971d1af35349c1a7c0a8b6c152449e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/02971d1af35349c1a7c0a8b6c152449e 2024-11-22T15:22:19,523 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 02971d1af35349c1a7c0a8b6c152449e(size=13.1 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:19,523 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:19,523 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288938980; duration=0sec 2024-11-22T15:22:19,523 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:19,523 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:19,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741947_1123 (size=17181) 2024-11-22T15:22:19,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=505 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/93daaf6a55ef4d0b8ae496ac2f6b4bee 2024-11-22T15:22:19,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/263fab70748b4c5a93a0482c7a2f2144 is 50, key is test_row_0/B:col10/1732288939498/Put/seqid=0 2024-11-22T15:22:19,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288999609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288999611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741948_1124 (size=12301) 2024-11-22T15:22:19,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288999716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288999726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-22T15:22:19,855 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-22T15:22:19,864 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-22T15:22:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T15:22:19,865 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:19,866 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:19,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:19,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732288999919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732288999935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T15:22:20,019 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T15:22:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:20,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=505 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/263fab70748b4c5a93a0482c7a2f2144 2024-11-22T15:22:20,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/ca730382bd564cd490a7ab5a116475b5 is 50, key is test_row_0/C:col10/1732288939498/Put/seqid=0 2024-11-22T15:22:20,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741949_1125 (size=12301) 2024-11-22T15:22:20,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=505 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/ca730382bd564cd490a7ab5a116475b5 2024-11-22T15:22:20,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/93daaf6a55ef4d0b8ae496ac2f6b4bee as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee 2024-11-22T15:22:20,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee, entries=250, sequenceid=505, filesize=16.8 K 2024-11-22T15:22:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/263fab70748b4c5a93a0482c7a2f2144 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144 2024-11-22T15:22:20,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144, entries=150, sequenceid=505, filesize=12.0 K 2024-11-22T15:22:20,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/ca730382bd564cd490a7ab5a116475b5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5 2024-11-22T15:22:20,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T15:22:20,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5, entries=150, sequenceid=505, filesize=12.0 K 2024-11-22T15:22:20,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed6f777bba2efed5f759348895e3133f in 672ms, sequenceid=505, compaction requested=true 2024-11-22T15:22:20,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:20,173 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:20,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:20,173 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:20,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:20,174 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:20,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-22T15:22:20,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,176 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:22:20,176 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:20,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:20,176 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42941 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:20,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:20,176 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:20,176 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:20,176 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,176 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:20,176 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0acb1538d08e4d1abedece939d768789, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.2 K 2024-11-22T15:22:20,176 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/02971d1af35349c1a7c0a8b6c152449e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=41.9 K 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:20,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,177 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acb1538d08e4d1abedece939d768789, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:20,177 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02971d1af35349c1a7c0a8b6c152449e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:20,177 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8637015f862440d48e932665beac970c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732288938328 2024-11-22T15:22:20,178 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47f6fe33dadc4437b1c765ac4590a871, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732288938328 2024-11-22T15:22:20,178 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93daaf6a55ef4d0b8ae496ac2f6b4bee, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=505, earliestPutTs=1732288939487 2024-11-22T15:22:20,178 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 263fab70748b4c5a93a0482c7a2f2144, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=505, earliestPutTs=1732288939487 2024-11-22T15:22:20,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/2ea0e878ac3843b7845f3fb20f9a942e is 50, key is test_row_0/A:col10/1732288939608/Put/seqid=0 2024-11-22T15:22:20,204 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:20,205 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ca847b60e4564753a1a5b42f909384de is 50, key is test_row_0/B:col10/1732288939498/Put/seqid=0 2024-11-22T15:22:20,216 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#113 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:20,217 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0af260cbebbe41ccab8cb007f9ccfb62 is 50, key is test_row_0/A:col10/1732288939498/Put/seqid=0 2024-11-22T15:22:20,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:20,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:20,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741950_1126 (size=12301) 2024-11-22T15:22:20,255 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/2ea0e878ac3843b7845f3fb20f9a942e 2024-11-22T15:22:20,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741951_1127 (size=13561) 2024-11-22T15:22:20,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289000278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289000264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741952_1128 (size=13561) 2024-11-22T15:22:20,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/b432a12ef1fd4988a6301a68d87ee5a0 is 50, key is test_row_0/B:col10/1732288939608/Put/seqid=0 2024-11-22T15:22:20,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741953_1129 (size=12301) 2024-11-22T15:22:20,343 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/b432a12ef1fd4988a6301a68d87ee5a0 2024-11-22T15:22:20,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/11fb54a59af2484e860050b1bec6c033 is 50, key is test_row_0/C:col10/1732288939608/Put/seqid=0 2024-11-22T15:22:20,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289000389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289000389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741954_1130 (size=12301) 2024-11-22T15:22:20,398 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/11fb54a59af2484e860050b1bec6c033 2024-11-22T15:22:20,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/2ea0e878ac3843b7845f3fb20f9a942e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e 2024-11-22T15:22:20,410 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e, entries=150, sequenceid=530, filesize=12.0 K 2024-11-22T15:22:20,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/b432a12ef1fd4988a6301a68d87ee5a0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0 2024-11-22T15:22:20,423 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0, entries=150, sequenceid=530, filesize=12.0 K 2024-11-22T15:22:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/11fb54a59af2484e860050b1bec6c033 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033 2024-11-22T15:22:20,431 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033, entries=150, sequenceid=530, filesize=12.0 K 2024-11-22T15:22:20,432 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ed6f777bba2efed5f759348895e3133f in 257ms, sequenceid=530, compaction requested=true 2024-11-22T15:22:20,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-22T15:22:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-22T15:22:20,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-22T15:22:20,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 569 msec 2024-11-22T15:22:20,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 574 msec 2024-11-22T15:22:20,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-22T15:22:20,469 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-22T15:22:20,470 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:20,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-22T15:22:20,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T15:22:20,474 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:20,474 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:20,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T15:22:20,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:22:20,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:20,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:20,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/eb01c609eb594abfb5e8a3fad1c0a067 is 50, key is test_row_0/A:col10/1732288940252/Put/seqid=0 2024-11-22T15:22:20,625 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T15:22:20,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:20,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741955_1131 (size=14741) 2024-11-22T15:22:20,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/eb01c609eb594abfb5e8a3fad1c0a067 2024-11-22T15:22:20,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/87ab3d53b8e544d0a09329640466e7a6 is 50, key is test_row_0/B:col10/1732288940252/Put/seqid=0 2024-11-22T15:22:20,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289000687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,694 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ca847b60e4564753a1a5b42f909384de as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ca847b60e4564753a1a5b42f909384de 2024-11-22T15:22:20,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289000689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,702 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into ca847b60e4564753a1a5b42f909384de(size=13.2 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:20,703 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0af260cbebbe41ccab8cb007f9ccfb62 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0af260cbebbe41ccab8cb007f9ccfb62 2024-11-22T15:22:20,705 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,705 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288940173; duration=0sec 2024-11-22T15:22:20,706 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:20,706 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:20,706 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:20,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741956_1132 (size=12301) 2024-11-22T15:22:20,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/87ab3d53b8e544d0a09329640466e7a6 2024-11-22T15:22:20,711 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 0af260cbebbe41ccab8cb007f9ccfb62(size=13.2 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:20,712 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,712 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288940173; duration=0sec 2024-11-22T15:22:20,712 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:20,712 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:20,712 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:20,712 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:20,712 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,713 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9f7d13aa597a4dc1b193b4a66bd21909, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=49.2 K 2024-11-22T15:22:20,713 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f7d13aa597a4dc1b193b4a66bd21909, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732288937136 2024-11-22T15:22:20,717 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f97ed1d2eca435abe80af72a27d8c04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732288938328 2024-11-22T15:22:20,718 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ca730382bd564cd490a7ab5a116475b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=505, earliestPutTs=1732288939487 2024-11-22T15:22:20,739 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 11fb54a59af2484e860050b1bec6c033, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732288939599 2024-11-22T15:22:20,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/e0a2286047864900a64e781a448c762c is 50, key is test_row_0/C:col10/1732288940252/Put/seqid=0 2024-11-22T15:22:20,759 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#119 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:20,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9e700e290e864229878ddc7bcd9c5c08 is 50, key is test_row_0/C:col10/1732288939608/Put/seqid=0 2024-11-22T15:22:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T15:22:20,783 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T15:22:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:20,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741957_1133 (size=12301) 2024-11-22T15:22:20,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/e0a2286047864900a64e781a448c762c 2024-11-22T15:22:20,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289000793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/eb01c609eb594abfb5e8a3fad1c0a067 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067 2024-11-22T15:22:20,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:20,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289000801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,809 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067, entries=200, sequenceid=543, filesize=14.4 K 2024-11-22T15:22:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/87ab3d53b8e544d0a09329640466e7a6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6 2024-11-22T15:22:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741958_1134 (size=13595) 2024-11-22T15:22:20,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6, entries=150, sequenceid=543, filesize=12.0 K 2024-11-22T15:22:20,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/e0a2286047864900a64e781a448c762c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c 2024-11-22T15:22:20,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c, entries=150, sequenceid=543, filesize=12.0 K 2024-11-22T15:22:20,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/9e700e290e864229878ddc7bcd9c5c08 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e700e290e864229878ddc7bcd9c5c08 2024-11-22T15:22:20,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed6f777bba2efed5f759348895e3133f in 240ms, sequenceid=543, compaction requested=true 2024-11-22T15:22:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,851 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:20,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:20,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:20,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:20,853 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40603 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:20,853 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:20,853 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,853 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0af260cbebbe41ccab8cb007f9ccfb62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=39.7 K 2024-11-22T15:22:20,854 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0af260cbebbe41ccab8cb007f9ccfb62, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=505, earliestPutTs=1732288939487 2024-11-22T15:22:20,856 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ea0e878ac3843b7845f3fb20f9a942e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732288939599 2024-11-22T15:22:20,857 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb01c609eb594abfb5e8a3fad1c0a067, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1732288940252 2024-11-22T15:22:20,862 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into 9e700e290e864229878ddc7bcd9c5c08(size=13.3 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:20,862 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,862 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=12, startTime=1732288940174; duration=0sec 2024-11-22T15:22:20,863 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:20,863 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:20,863 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:20,863 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:20,864 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:20,865 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:20,865 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,865 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ca847b60e4564753a1a5b42f909384de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.3 K 2024-11-22T15:22:20,866 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ca847b60e4564753a1a5b42f909384de, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=505, earliestPutTs=1732288939487 2024-11-22T15:22:20,871 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b432a12ef1fd4988a6301a68d87ee5a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732288939599 2024-11-22T15:22:20,871 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 87ab3d53b8e544d0a09329640466e7a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1732288940252 2024-11-22T15:22:20,888 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:20,889 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/03278d8da614424e98689f6fb81cf66a is 50, key is test_row_0/A:col10/1732288940252/Put/seqid=0 2024-11-22T15:22:20,895 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#121 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:20,896 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/e996ccd447674810bacfae68c60b1c72 is 50, key is test_row_0/B:col10/1732288940252/Put/seqid=0 2024-11-22T15:22:20,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:20,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741959_1135 (size=13663) 2024-11-22T15:22:20,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:20,942 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:20,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:20,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741960_1136 (size=13663) 2024-11-22T15:22:20,953 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/03278d8da614424e98689f6fb81cf66a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/03278d8da614424e98689f6fb81cf66a 2024-11-22T15:22:20,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/e996ccd447674810bacfae68c60b1c72 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/e996ccd447674810bacfae68c60b1c72 2024-11-22T15:22:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ab7d410000ec4154ad60e5a60daa5441 is 50, key is test_row_0/A:col10/1732288940676/Put/seqid=0 2024-11-22T15:22:20,969 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 03278d8da614424e98689f6fb81cf66a(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:20,969 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,970 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288940851; duration=0sec 2024-11-22T15:22:20,970 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:20,970 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:20,970 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:22:20,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:22:20,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:22:20,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. because compaction request was cancelled 2024-11-22T15:22:20,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:20,977 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into e996ccd447674810bacfae68c60b1c72(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:20,977 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:20,977 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288940851; duration=0sec 2024-11-22T15:22:20,977 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:20,977 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741961_1137 (size=12301) 2024-11-22T15:22:20,999 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ab7d410000ec4154ad60e5a60daa5441 2024-11-22T15:22:21,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:21,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:21,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4bfee0ad1fa24554a6a10c4915589ebe is 50, key is test_row_0/B:col10/1732288940676/Put/seqid=0 2024-11-22T15:22:21,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741962_1138 (size=12301) 2024-11-22T15:22:21,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289001058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289001064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T15:22:21,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289001167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289001169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289001375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289001375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,465 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4bfee0ad1fa24554a6a10c4915589ebe 2024-11-22T15:22:21,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/15a65b1a524e4851819cd00d7ed59faa is 50, key is test_row_0/C:col10/1732288940676/Put/seqid=0 2024-11-22T15:22:21,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741963_1139 (size=12301) 2024-11-22T15:22:21,503 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/15a65b1a524e4851819cd00d7ed59faa 2024-11-22T15:22:21,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/ab7d410000ec4154ad60e5a60daa5441 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441 2024-11-22T15:22:21,529 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441, entries=150, sequenceid=569, filesize=12.0 K 2024-11-22T15:22:21,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/4bfee0ad1fa24554a6a10c4915589ebe as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe 2024-11-22T15:22:21,538 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe, entries=150, sequenceid=569, filesize=12.0 K 2024-11-22T15:22:21,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/15a65b1a524e4851819cd00d7ed59faa as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa 2024-11-22T15:22:21,554 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa, entries=150, sequenceid=569, filesize=12.0 K 2024-11-22T15:22:21,556 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for ed6f777bba2efed5f759348895e3133f in 614ms, sequenceid=569, compaction requested=true 2024-11-22T15:22:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-22T15:22:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-22T15:22:21,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-22T15:22:21,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0840 sec 2024-11-22T15:22:21,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.0910 sec 2024-11-22T15:22:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-22T15:22:21,577 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-22T15:22:21,579 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-22T15:22:21,583 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:21,585 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:21,585 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:21,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:21,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:21,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:21,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0cf6963eec10474fafd571324ef84494 is 50, key is test_row_0/A:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:21,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741964_1140 (size=14741) 2024-11-22T15:22:21,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0cf6963eec10474fafd571324ef84494 2024-11-22T15:22:21,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/199f332ceb1c495ba8d77abc5a62b7a3 is 50, key is test_row_0/B:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:21,736 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:21,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:21,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:21,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:21,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289001732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:21,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:21,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289001737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741965_1141 (size=12301) 2024-11-22T15:22:21,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289001839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:21,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289001842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:21,890 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:21,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:21,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:21,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:21,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:21,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:21,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289002042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289002045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:22,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:22,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/199f332ceb1c495ba8d77abc5a62b7a3 2024-11-22T15:22:22,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:22,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6f7381fe331c4d0c82b31a34fdea6b02 is 50, key is test_row_0/C:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:22,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:22,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:22,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741966_1142 (size=12301) 2024-11-22T15:22:22,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6f7381fe331c4d0c82b31a34fdea6b02 2024-11-22T15:22:22,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0cf6963eec10474fafd571324ef84494 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494 2024-11-22T15:22:22,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494, entries=200, sequenceid=586, filesize=14.4 K 2024-11-22T15:22:22,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/199f332ceb1c495ba8d77abc5a62b7a3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3 2024-11-22T15:22:22,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3, entries=150, sequenceid=586, filesize=12.0 K 2024-11-22T15:22:22,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/6f7381fe331c4d0c82b31a34fdea6b02 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02 2024-11-22T15:22:22,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02, entries=150, sequenceid=586, filesize=12.0 K 2024-11-22T15:22:22,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ed6f777bba2efed5f759348895e3133f in 619ms, sequenceid=586, compaction requested=true 2024-11-22T15:22:22,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:22,304 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:22,304 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:22,304 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:22,306 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:22,306 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:22,306 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,307 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/03278d8da614424e98689f6fb81cf66a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=39.8 K 2024-11-22T15:22:22,307 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 03278d8da614424e98689f6fb81cf66a, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1732288940252 2024-11-22T15:22:22,308 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:22,308 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:22,308 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,308 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e700e290e864229878ddc7bcd9c5c08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=49.3 K 2024-11-22T15:22:22,308 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e700e290e864229878ddc7bcd9c5c08, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732288939599 2024-11-22T15:22:22,308 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ab7d410000ec4154ad60e5a60daa5441, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732288940660 2024-11-22T15:22:22,309 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0a2286047864900a64e781a448c762c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1732288940252 2024-11-22T15:22:22,309 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cf6963eec10474fafd571324ef84494, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:22,310 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15a65b1a524e4851819cd00d7ed59faa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732288940660 2024-11-22T15:22:22,311 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f7381fe331c4d0c82b31a34fdea6b02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:22,329 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#128 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:22,329 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/47c7acd3fb2640538251548771f2935c is 50, key is test_row_0/A:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:22,333 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#129 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:22,333 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/dbed00b94a0243ffbfc124b4e3feeb2e is 50, key is test_row_0/C:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:22,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:22,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:22,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,369 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/4368385d64b4427193a9ac28631b4bfc is 50, key is test_row_0/A:col10/1732288942354/Put/seqid=0 2024-11-22T15:22:22,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741967_1143 (size=13765) 2024-11-22T15:22:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741968_1144 (size=13731) 2024-11-22T15:22:22,393 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/47c7acd3fb2640538251548771f2935c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47c7acd3fb2640538251548771f2935c 2024-11-22T15:22:22,399 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/dbed00b94a0243ffbfc124b4e3feeb2e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/dbed00b94a0243ffbfc124b4e3feeb2e 2024-11-22T15:22:22,402 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 47c7acd3fb2640538251548771f2935c(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:22,402 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:22,402 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=13, startTime=1732288942304; duration=0sec 2024-11-22T15:22:22,403 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:22,403 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:22,403 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:22,407 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into dbed00b94a0243ffbfc124b4e3feeb2e(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:22,407 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:22,407 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=12, startTime=1732288942304; duration=0sec 2024-11-22T15:22:22,408 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:22,408 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:22,408 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:22,408 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:22,408 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,408 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/e996ccd447674810bacfae68c60b1c72, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=37.4 K 2024-11-22T15:22:22,409 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e996ccd447674810bacfae68c60b1c72, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1732288940252 2024-11-22T15:22:22,410 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bfee0ad1fa24554a6a10c4915589ebe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732288940660 2024-11-22T15:22:22,410 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 199f332ceb1c495ba8d77abc5a62b7a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:22,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741969_1145 (size=17181) 2024-11-22T15:22:22,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=609 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/4368385d64b4427193a9ac28631b4bfc 2024-11-22T15:22:22,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289002436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289002440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,451 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#131 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:22,452 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6a6131ebc903492baae8d99d1b3d6249 is 50, key is test_row_0/B:col10/1732288941030/Put/seqid=0 2024-11-22T15:22:22,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7781856a1c4249e58ec492299b5243a2 is 50, key is test_row_0/B:col10/1732288942354/Put/seqid=0 2024-11-22T15:22:22,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741970_1146 (size=13765) 2024-11-22T15:22:22,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:22,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741971_1147 (size=12301) 2024-11-22T15:22:22,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=609 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7781856a1c4249e58ec492299b5243a2 2024-11-22T15:22:22,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289002550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289002559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 is 50, key is test_row_0/C:col10/1732288942354/Put/seqid=0 2024-11-22T15:22:22,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741972_1148 (size=12301) 2024-11-22T15:22:22,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=609 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 2024-11-22T15:22:22,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/4368385d64b4427193a9ac28631b4bfc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc 2024-11-22T15:22:22,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc, entries=250, sequenceid=609, filesize=16.8 K 2024-11-22T15:22:22,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7781856a1c4249e58ec492299b5243a2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2 2024-11-22T15:22:22,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2, entries=150, sequenceid=609, filesize=12.0 K 2024-11-22T15:22:22,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 2024-11-22T15:22:22,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0, entries=150, sequenceid=609, filesize=12.0 K 2024-11-22T15:22:22,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ed6f777bba2efed5f759348895e3133f in 293ms, sequenceid=609, compaction requested=false 2024-11-22T15:22:22,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:22,677 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-22T15:22:22,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:22,678 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:22,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:22,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/16dc56d2e9d74504b4127bbf4cabe00e is 50, key is test_row_0/A:col10/1732288942434/Put/seqid=0 2024-11-22T15:22:22,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741973_1149 (size=12301) 2024-11-22T15:22:22,739 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=624 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/16dc56d2e9d74504b4127bbf4cabe00e 2024-11-22T15:22:22,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7d7a06f8dfa943dba707ccdae9f01892 is 50, key is test_row_0/B:col10/1732288942434/Put/seqid=0 2024-11-22T15:22:22,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. as already flushing 2024-11-22T15:22:22,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:22,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741974_1150 (size=12301) 2024-11-22T15:22:22,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289002870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289002872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56558 deadline: 1732289002894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,899 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8211 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:22,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56552 deadline: 1732289002898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,903 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8221 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:22,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6a6131ebc903492baae8d99d1b3d6249 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6a6131ebc903492baae8d99d1b3d6249 2024-11-22T15:22:22,933 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ed28bb to 127.0.0.1:52970 2024-11-22T15:22:22,933 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:22,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56528 deadline: 1732289002935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,936 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8242 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:22,937 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 6a6131ebc903492baae8d99d1b3d6249(size=13.4 K), total size for store is 25.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:22,937 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:52970 2024-11-22T15:22:22,937 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:22,937 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:22,937 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=13, startTime=1732288942304; duration=0sec 2024-11-22T15:22:22,937 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:22,937 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:22,938 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a1285d to 127.0.0.1:52970 2024-11-22T15:22:22,938 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:22,943 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47fe2fa7 to 127.0.0.1:52970 2024-11-22T15:22:22,943 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:22,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289002976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:22,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289002983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:22,992 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:22:23,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:23,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56590 deadline: 1732289003178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:23,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:23,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56564 deadline: 1732289003188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:23,207 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=624 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7d7a06f8dfa943dba707ccdae9f01892 2024-11-22T15:22:23,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1e265f9f52e943ac82098e1a791c9eb9 is 50, key is test_row_0/C:col10/1732288942434/Put/seqid=0 2024-11-22T15:22:23,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741975_1151 (size=12301) 2024-11-22T15:22:23,251 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=624 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1e265f9f52e943ac82098e1a791c9eb9 2024-11-22T15:22:23,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/16dc56d2e9d74504b4127bbf4cabe00e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e 2024-11-22T15:22:23,261 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e, entries=150, sequenceid=624, filesize=12.0 K 2024-11-22T15:22:23,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/7d7a06f8dfa943dba707ccdae9f01892 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892 2024-11-22T15:22:23,272 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892, entries=150, sequenceid=624, filesize=12.0 K 2024-11-22T15:22:23,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/1e265f9f52e943ac82098e1a791c9eb9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9 2024-11-22T15:22:23,281 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9, entries=150, sequenceid=624, filesize=12.0 K 2024-11-22T15:22:23,282 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed6f777bba2efed5f759348895e3133f in 605ms, sequenceid=624, compaction requested=true 2024-11-22T15:22:23,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:23,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:23,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-22T15:22:23,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-22T15:22:23,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-22T15:22:23,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6980 sec 2024-11-22T15:22:23,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.7060 sec 2024-11-22T15:22:23,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:23,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:22:23,484 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45b55c24 to 127.0.0.1:52970 2024-11-22T15:22:23,484 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:23,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:23,492 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e52b42a to 127.0.0.1:52970 2024-11-22T15:22:23,492 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:23,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0efb016a736641c3a082b41971777fc8 is 50, key is test_row_0/A:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:23,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741976_1152 (size=12301) 2024-11-22T15:22:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-22T15:22:23,689 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-22T15:22:23,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0efb016a736641c3a082b41971777fc8 2024-11-22T15:22:23,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ed47153b559641f4ac023153c68e8859 is 50, key is test_row_0/B:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:23,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741977_1153 (size=12301) 2024-11-22T15:22:24,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ed47153b559641f4ac023153c68e8859 2024-11-22T15:22:24,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0f016afd41bd4f00a68b0ca6ce0b7eda is 50, key is test_row_0/C:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:24,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741978_1154 (size=12301) 2024-11-22T15:22:24,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0f016afd41bd4f00a68b0ca6ce0b7eda 2024-11-22T15:22:24,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/0efb016a736641c3a082b41971777fc8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8 2024-11-22T15:22:24,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8, entries=150, sequenceid=649, filesize=12.0 K 2024-11-22T15:22:24,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/ed47153b559641f4ac023153c68e8859 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859 2024-11-22T15:22:24,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859, entries=150, sequenceid=649, filesize=12.0 K 2024-11-22T15:22:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/0f016afd41bd4f00a68b0ca6ce0b7eda as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda 2024-11-22T15:22:24,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda, entries=150, sequenceid=649, filesize=12.0 K 2024-11-22T15:22:24,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for ed6f777bba2efed5f759348895e3133f in 1289ms, sequenceid=649, compaction requested=true 2024-11-22T15:22:24,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:24,774 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:24,774 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed6f777bba2efed5f759348895e3133f:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:24,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:24,775 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55548 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:24,775 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/A is initiating minor compaction (all files) 2024-11-22T15:22:24,775 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/A in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:24,775 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:24,775 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47c7acd3fb2640538251548771f2935c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=54.2 K 2024-11-22T15:22:24,775 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/B is initiating minor compaction (all files) 2024-11-22T15:22:24,775 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/B in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:24,775 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6a6131ebc903492baae8d99d1b3d6249, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=49.5 K 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47c7acd3fb2640538251548771f2935c, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a6131ebc903492baae8d99d1b3d6249, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4368385d64b4427193a9ac28631b4bfc, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=609, earliestPutTs=1732288941727 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7781856a1c4249e58ec492299b5243a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=609, earliestPutTs=1732288941727 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16dc56d2e9d74504b4127bbf4cabe00e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=624, earliestPutTs=1732288942396 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d7a06f8dfa943dba707ccdae9f01892, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=624, earliestPutTs=1732288942396 2024-11-22T15:22:24,776 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0efb016a736641c3a082b41971777fc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732288942847 2024-11-22T15:22:24,777 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ed47153b559641f4ac023153c68e8859, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732288942847 2024-11-22T15:22:24,804 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#A#compaction#140 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:24,805 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/04a1a4ac25f04cc6a4304c18e5e411fc is 50, key is test_row_0/A:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:24,809 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#B#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:24,809 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6bf1786648364787bb910bf93040484d is 50, key is test_row_0/B:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:24,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741979_1155 (size=13901) 2024-11-22T15:22:24,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741980_1156 (size=13901) 2024-11-22T15:22:25,232 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/04a1a4ac25f04cc6a4304c18e5e411fc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/04a1a4ac25f04cc6a4304c18e5e411fc 2024-11-22T15:22:25,236 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/6bf1786648364787bb910bf93040484d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6bf1786648364787bb910bf93040484d 2024-11-22T15:22:25,240 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/A of ed6f777bba2efed5f759348895e3133f into 04a1a4ac25f04cc6a4304c18e5e411fc(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:25,240 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:25,240 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/A, priority=12, startTime=1732288944774; duration=0sec 2024-11-22T15:22:25,240 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:25,240 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:A 2024-11-22T15:22:25,240 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:25,245 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:25,245 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed6f777bba2efed5f759348895e3133f/C is initiating minor compaction (all files) 2024-11-22T15:22:25,246 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed6f777bba2efed5f759348895e3133f/C in TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:25,246 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/dbed00b94a0243ffbfc124b4e3feeb2e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp, totalSize=49.4 K 2024-11-22T15:22:25,247 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbed00b94a0243ffbfc124b4e3feeb2e, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732288941030 2024-11-22T15:22:25,247 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/B of ed6f777bba2efed5f759348895e3133f into 6bf1786648364787bb910bf93040484d(size=13.6 K), total size for store is 13.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:25,247 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:25,247 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/B, priority=12, startTime=1732288944774; duration=0sec 2024-11-22T15:22:25,247 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a315f0b7cdee4c0faa5d66ae3fc3acc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=609, earliestPutTs=1732288941727 2024-11-22T15:22:25,248 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e265f9f52e943ac82098e1a791c9eb9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=624, earliestPutTs=1732288942396 2024-11-22T15:22:25,248 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:25,248 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:B 2024-11-22T15:22:25,248 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f016afd41bd4f00a68b0ca6ce0b7eda, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=649, earliestPutTs=1732288942847 2024-11-22T15:22:25,261 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed6f777bba2efed5f759348895e3133f#C#compaction#142 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:25,261 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/cca7588b8365465b8ad459242d63db8c is 50, key is test_row_0/C:col10/1732288942851/Put/seqid=0 2024-11-22T15:22:25,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741981_1157 (size=13867) 2024-11-22T15:22:25,673 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/cca7588b8365465b8ad459242d63db8c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/cca7588b8365465b8ad459242d63db8c 2024-11-22T15:22:25,680 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed6f777bba2efed5f759348895e3133f/C of ed6f777bba2efed5f759348895e3133f into cca7588b8365465b8ad459242d63db8c(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:25,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:25,680 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f., storeName=ed6f777bba2efed5f759348895e3133f/C, priority=12, startTime=1732288944774; duration=0sec 2024-11-22T15:22:25,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:25,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed6f777bba2efed5f759348895e3133f:C 2024-11-22T15:22:32,122 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T15:22:32,123 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T15:22:32,948 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f2091cc to 127.0.0.1:52970 2024-11-22T15:22:32,948 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:32,969 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18cb251d to 127.0.0.1:52970 2024-11-22T15:22:32,969 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:32,984 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09bd0964 to 127.0.0.1:52970 2024-11-22T15:22:32,984 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 133 2024-11-22T15:22:32,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 124 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3136 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3038 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1407 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4221 rows 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1406 2024-11-22T15:22:32,986 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4218 rows 2024-11-22T15:22:32,986 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:22:32,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fcb5f29 to 127.0.0.1:52970 2024-11-22T15:22:32,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:32,993 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:22:32,996 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:22:33,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:33,002 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288953002"}]},"ts":"1732288953002"} 2024-11-22T15:22:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:33,003 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:22:33,012 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:22:33,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:22:33,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, UNASSIGN}] 2024-11-22T15:22:33,018 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, UNASSIGN 2024-11-22T15:22:33,019 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=ed6f777bba2efed5f759348895e3133f, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:33,020 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:22:33,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:33,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:33,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:33,177 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:33,177 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing ed6f777bba2efed5f759348895e3133f, disabling compactions & flushes 2024-11-22T15:22:33,178 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. after waiting 0 ms 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:33,178 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing ed6f777bba2efed5f759348895e3133f 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=A 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=B 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed6f777bba2efed5f759348895e3133f, store=C 2024-11-22T15:22:33,178 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:33,183 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/7d1e3ed5f8744a1e98a34591aeef4372 is 50, key is test_row_0/A:col10/1732288943491/Put/seqid=0 2024-11-22T15:22:33,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741982_1158 (size=12301) 2024-11-22T15:22:33,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:33,588 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=659 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/7d1e3ed5f8744a1e98a34591aeef4372 2024-11-22T15:22:33,596 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2eebc6ece97741cb8dae35a87087a2cc is 50, key is test_row_0/B:col10/1732288943491/Put/seqid=0 2024-11-22T15:22:33,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741983_1159 (size=12301) 2024-11-22T15:22:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:34,002 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=659 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2eebc6ece97741cb8dae35a87087a2cc 2024-11-22T15:22:34,010 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3c1b381a753a484d9a69e28a6841e957 is 50, key is test_row_0/C:col10/1732288943491/Put/seqid=0 2024-11-22T15:22:34,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741984_1160 (size=12301) 2024-11-22T15:22:34,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:34,415 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=659 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3c1b381a753a484d9a69e28a6841e957 2024-11-22T15:22:34,420 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/A/7d1e3ed5f8744a1e98a34591aeef4372 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/7d1e3ed5f8744a1e98a34591aeef4372 2024-11-22T15:22:34,425 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/7d1e3ed5f8744a1e98a34591aeef4372, entries=150, sequenceid=659, filesize=12.0 K 2024-11-22T15:22:34,426 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/B/2eebc6ece97741cb8dae35a87087a2cc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2eebc6ece97741cb8dae35a87087a2cc 2024-11-22T15:22:34,431 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2eebc6ece97741cb8dae35a87087a2cc, entries=150, sequenceid=659, filesize=12.0 K 2024-11-22T15:22:34,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/.tmp/C/3c1b381a753a484d9a69e28a6841e957 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3c1b381a753a484d9a69e28a6841e957 2024-11-22T15:22:34,438 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3c1b381a753a484d9a69e28a6841e957, entries=150, sequenceid=659, filesize=12.0 K 2024-11-22T15:22:34,439 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ed6f777bba2efed5f759348895e3133f in 1261ms, sequenceid=659, compaction requested=false 2024-11-22T15:22:34,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18fb3ebed6cd41448f2376e3ba95470f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/1bc09a5bb0d344fda531ef7c79aaf625, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/297bb89b92a94260acabf49c53a578bb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ebaf0a80a5834c82a08fece47eec3c3f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/aa6684aea1924baa9abd37bded5897f1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/27e0a0d737d94b06b78543252363df5a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/26a518ac9f8f417080875fa31d9e8ab4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/57e764fff3f34308bafe721016b79b7a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ee2d60df013b48a7b157bc18d9eac9ed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18a95b04998e4cd9a2defab662b25301, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/02971d1af35349c1a7c0a8b6c152449e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0af260cbebbe41ccab8cb007f9ccfb62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/03278d8da614424e98689f6fb81cf66a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47c7acd3fb2640538251548771f2935c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8] to archive 2024-11-22T15:22:34,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:22:34,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811a098b918240498f17800d23a78de0 2024-11-22T15:22:34,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/076afb52330547c49cd0cb45be340f06 2024-11-22T15:22:34,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/bb1e942609ce41c2be9090fc1a272a00 2024-11-22T15:22:34,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18fb3ebed6cd41448f2376e3ba95470f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18fb3ebed6cd41448f2376e3ba95470f 2024-11-22T15:22:34,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a98afbfdf1a248f1859f8b8dd0dc35e8 2024-11-22T15:22:34,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/1bc09a5bb0d344fda531ef7c79aaf625 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/1bc09a5bb0d344fda531ef7c79aaf625 2024-11-22T15:22:34,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ac3b33115bed4141b4334aa086e848ab 2024-11-22T15:22:34,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/78242f17e7ba403aa1413ea437d65090 2024-11-22T15:22:34,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0e647622ef114b53b2dfff88600bfd63 2024-11-22T15:22:34,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/297bb89b92a94260acabf49c53a578bb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/297bb89b92a94260acabf49c53a578bb 2024-11-22T15:22:34,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/a057db3163ea451e96e6a3911c58f025 2024-11-22T15:22:34,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/66ef8bb51deb4eb8814d4068d4440543 2024-11-22T15:22:34,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ebaf0a80a5834c82a08fece47eec3c3f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ebaf0a80a5834c82a08fece47eec3c3f 2024-11-22T15:22:34,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/70af24b7693f45f689e559b522efbc11 2024-11-22T15:22:34,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/3ce7428c7e61462d8f474f64b4aac111 2024-11-22T15:22:34,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/aa6684aea1924baa9abd37bded5897f1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/aa6684aea1924baa9abd37bded5897f1 2024-11-22T15:22:34,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d337a0c7c1c54cdea4d4e5db413831b4 2024-11-22T15:22:34,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/187691589fc34abca095072a71171138 2024-11-22T15:22:34,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/9589b79de635430792198f94df449008 2024-11-22T15:22:34,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/27e0a0d737d94b06b78543252363df5a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/27e0a0d737d94b06b78543252363df5a 2024-11-22T15:22:34,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/cd9f875a56604e0faa23ca9890f17d42 2024-11-22T15:22:34,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/26a518ac9f8f417080875fa31d9e8ab4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/26a518ac9f8f417080875fa31d9e8ab4 2024-11-22T15:22:34,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/e1a8c41c332847b99f82c46d7eabc540 2024-11-22T15:22:34,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/8297579f02ea4eeb8255f13fe7145a62 2024-11-22T15:22:34,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/d1da24f0322c4ed49c8ffbd7344c3874 2024-11-22T15:22:34,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/57e764fff3f34308bafe721016b79b7a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/57e764fff3f34308bafe721016b79b7a 2024-11-22T15:22:34,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/6274db811c1a4118a78d06f141b9d6ab 2024-11-22T15:22:34,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/f4cb6e13c9284a6ba7db2fc7ec2dd340 2024-11-22T15:22:34,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ee2d60df013b48a7b157bc18d9eac9ed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ee2d60df013b48a7b157bc18d9eac9ed 2024-11-22T15:22:34,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/10adab8e7c90487ab0e169d132fe9623 2024-11-22T15:22:34,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/811d4ad9362f43eb8ff7dcd1b6a8a07d 2024-11-22T15:22:34,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18a95b04998e4cd9a2defab662b25301 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/18a95b04998e4cd9a2defab662b25301 2024-11-22T15:22:34,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/dc67e058c89446d29d6a7bfb00bec074 2024-11-22T15:22:34,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/45b1c98712304ddf8ed1bcad2d11309e 2024-11-22T15:22:34,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/02971d1af35349c1a7c0a8b6c152449e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/02971d1af35349c1a7c0a8b6c152449e 2024-11-22T15:22:34,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47f6fe33dadc4437b1c765ac4590a871 2024-11-22T15:22:34,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/93daaf6a55ef4d0b8ae496ac2f6b4bee 2024-11-22T15:22:34,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0af260cbebbe41ccab8cb007f9ccfb62 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0af260cbebbe41ccab8cb007f9ccfb62 2024-11-22T15:22:34,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/2ea0e878ac3843b7845f3fb20f9a942e 2024-11-22T15:22:34,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/eb01c609eb594abfb5e8a3fad1c0a067 2024-11-22T15:22:34,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/03278d8da614424e98689f6fb81cf66a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/03278d8da614424e98689f6fb81cf66a 2024-11-22T15:22:34,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/ab7d410000ec4154ad60e5a60daa5441 2024-11-22T15:22:34,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0cf6963eec10474fafd571324ef84494 2024-11-22T15:22:34,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47c7acd3fb2640538251548771f2935c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/47c7acd3fb2640538251548771f2935c 2024-11-22T15:22:34,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/4368385d64b4427193a9ac28631b4bfc 2024-11-22T15:22:34,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/16dc56d2e9d74504b4127bbf4cabe00e 2024-11-22T15:22:34,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/0efb016a736641c3a082b41971777fc8 2024-11-22T15:22:34,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/d48ab69f07d643389ddaa337fa3ffd6e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/5b33c56cc1e04168a4295558e549eee4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/93bd2e34f615459d936779c178f1d802, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/769b27334df34d588cacc54ad33eaeb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6c0c4b9276004d0e91244bf4774cd00f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0d3346771dbb454a9db1fd8ad8bd1366, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c75dd94ec3e84e7bbb10ad008eadcc24, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/156f9714ca2b4ad084cd7dd05ba26242, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/a3d27f808b644e209a19c25ef84c1000, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/eeb965a4e2694a459762d877fe40a9e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0acb1538d08e4d1abedece939d768789, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ca847b60e4564753a1a5b42f909384de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/e996ccd447674810bacfae68c60b1c72, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6a6131ebc903492baae8d99d1b3d6249, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859] to archive 2024-11-22T15:22:34,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:22:34,539 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3fa8ab96d7a94f70a547b2b30b426dc7 2024-11-22T15:22:34,540 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2a456b2630024494b6d05ab7c6cafdc6 2024-11-22T15:22:34,541 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/d48ab69f07d643389ddaa337fa3ffd6e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/d48ab69f07d643389ddaa337fa3ffd6e 2024-11-22T15:22:34,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4de3cda9762a41ed9ead25798d074956 2024-11-22T15:22:34,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/125485b1c5d6464b8b84e84b77acfd08 2024-11-22T15:22:34,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/5b33c56cc1e04168a4295558e549eee4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/5b33c56cc1e04168a4295558e549eee4 2024-11-22T15:22:34,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/270e3550bbce42cea912151f3366e87e 2024-11-22T15:22:34,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8d7463c00b534b2a8090cdf358c82202 2024-11-22T15:22:34,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/93bd2e34f615459d936779c178f1d802 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/93bd2e34f615459d936779c178f1d802 2024-11-22T15:22:34,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/116957e902b747ccabcbaf9230ca3004 2024-11-22T15:22:34,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/283aa55ae06843ddae0ccd096edf15f8 2024-11-22T15:22:34,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/df711b9f31c34b2382ee8f56095dda26 2024-11-22T15:22:34,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/769b27334df34d588cacc54ad33eaeb6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/769b27334df34d588cacc54ad33eaeb6 2024-11-22T15:22:34,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2d499b2ad3dd45a39a38c5fdfc50be55 2024-11-22T15:22:34,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/192bd902f93e4095a69493335c9fb492 2024-11-22T15:22:34,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6c0c4b9276004d0e91244bf4774cd00f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6c0c4b9276004d0e91244bf4774cd00f 2024-11-22T15:22:34,557 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/505bde60d08f43a6a747b714b2827dba 2024-11-22T15:22:34,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c0d4a9d1317c4844afde506b9d6ba840 2024-11-22T15:22:34,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0d3346771dbb454a9db1fd8ad8bd1366 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0d3346771dbb454a9db1fd8ad8bd1366 2024-11-22T15:22:34,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/15570d43181d47cfb4b99bac4b23d602 2024-11-22T15:22:34,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3ea0de07dd5f42c9bf4435214445f35a 2024-11-22T15:22:34,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c75dd94ec3e84e7bbb10ad008eadcc24 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/c75dd94ec3e84e7bbb10ad008eadcc24 2024-11-22T15:22:34,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ebcf409d10e246008c47a64fb8ffe19d 2024-11-22T15:22:34,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ac26091912774439814fe8bad58d0fc1 2024-11-22T15:22:34,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/156f9714ca2b4ad084cd7dd05ba26242 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/156f9714ca2b4ad084cd7dd05ba26242 2024-11-22T15:22:34,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ff7c2010c34d430d85ba369ada86341f 2024-11-22T15:22:34,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6f937da304cf49b38ead0eaafb9650f1 2024-11-22T15:22:34,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/a3d27f808b644e209a19c25ef84c1000 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/a3d27f808b644e209a19c25ef84c1000 2024-11-22T15:22:34,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/666a329fca07492c9d5871685b2b2589 2024-11-22T15:22:34,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/33cb133859654c199206a345938c04e2 2024-11-22T15:22:34,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/eeb965a4e2694a459762d877fe40a9e3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/eeb965a4e2694a459762d877fe40a9e3 2024-11-22T15:22:34,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/3a825cf99a124868b4dacd70433f9020 2024-11-22T15:22:34,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/cc7a273680594f9cbf2baf77a67d235a 2024-11-22T15:22:34,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0acb1538d08e4d1abedece939d768789 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/0acb1538d08e4d1abedece939d768789 2024-11-22T15:22:34,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/777c6d4e2f7b437bb85de796bc164ab9 2024-11-22T15:22:34,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/8637015f862440d48e932665beac970c 2024-11-22T15:22:34,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ca847b60e4564753a1a5b42f909384de to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ca847b60e4564753a1a5b42f909384de 2024-11-22T15:22:34,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/263fab70748b4c5a93a0482c7a2f2144 2024-11-22T15:22:34,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/b432a12ef1fd4988a6301a68d87ee5a0 2024-11-22T15:22:34,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/e996ccd447674810bacfae68c60b1c72 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/e996ccd447674810bacfae68c60b1c72 2024-11-22T15:22:34,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/87ab3d53b8e544d0a09329640466e7a6 2024-11-22T15:22:34,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/4bfee0ad1fa24554a6a10c4915589ebe 2024-11-22T15:22:34,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6a6131ebc903492baae8d99d1b3d6249 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6a6131ebc903492baae8d99d1b3d6249 2024-11-22T15:22:34,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/199f332ceb1c495ba8d77abc5a62b7a3 2024-11-22T15:22:34,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7781856a1c4249e58ec492299b5243a2 2024-11-22T15:22:34,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/7d7a06f8dfa943dba707ccdae9f01892 2024-11-22T15:22:34,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/ed47153b559641f4ac023153c68e8859 2024-11-22T15:22:34,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/70271f10a8d043bfb562d624cc04e203, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/17ccf7de0d2c4ef293827c498d833d8b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/2ffdfabab6aa4721b5780142ed8aa825, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/95a1b12cce754aef97c916275114d38d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b16ae5ecc0994ee69d2bfc871249b24c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7dcfacb44ab04a958620c88fd35406d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/89286c1fabbf4e7cb7f0f5511e90bd3b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b45c5789ce4446b0a2caa5a862490b8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/344b1e183d14413980af448583ebe3bc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/df5a33d6dccc4ebd929e38da34f21d49, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9f7d13aa597a4dc1b193b4a66bd21909, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e700e290e864229878ddc7bcd9c5c08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/dbed00b94a0243ffbfc124b4e3feeb2e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda] to archive 2024-11-22T15:22:34,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:22:34,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e4031ecd26b41a0a4539aae86f0e5ad 2024-11-22T15:22:34,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/77acae05d7324f25b6bfaf7843aba5a1 2024-11-22T15:22:34,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/70271f10a8d043bfb562d624cc04e203 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/70271f10a8d043bfb562d624cc04e203 2024-11-22T15:22:34,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/18d10c63c73944968e172c818f755f9a 2024-11-22T15:22:34,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/42d351a76a82451583e10d18f9885071 2024-11-22T15:22:34,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/17ccf7de0d2c4ef293827c498d833d8b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/17ccf7de0d2c4ef293827c498d833d8b 2024-11-22T15:22:34,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7981649e97ab46f592058d57a07b7fef 2024-11-22T15:22:34,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0a9597a5f64e4794b41cae09cef188f5 2024-11-22T15:22:34,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/2ffdfabab6aa4721b5780142ed8aa825 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/2ffdfabab6aa4721b5780142ed8aa825 2024-11-22T15:22:34,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/31489670f329482e9900072930dda420 2024-11-22T15:22:34,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/75e977984a3643f587d021dd04fc6fb6 2024-11-22T15:22:34,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/f598c33e5c35448aa383b1f8396a2e3f 2024-11-22T15:22:34,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/95a1b12cce754aef97c916275114d38d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/95a1b12cce754aef97c916275114d38d 2024-11-22T15:22:34,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/35abebe13b824482bc058ac2dba5884e 2024-11-22T15:22:34,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/98ca54e3390b4d45ad31a131c6a26111 2024-11-22T15:22:34,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b16ae5ecc0994ee69d2bfc871249b24c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b16ae5ecc0994ee69d2bfc871249b24c 2024-11-22T15:22:34,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/360a1816a8ae4e80b44bc4ae74741d7d 2024-11-22T15:22:34,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/fe9e5cd44320474ca492cb4b05845a5c 2024-11-22T15:22:34,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7dcfacb44ab04a958620c88fd35406d4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/7dcfacb44ab04a958620c88fd35406d4 2024-11-22T15:22:34,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6387d87b450a4e28a52ae330ac178dd7 2024-11-22T15:22:34,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/561a87ed7c1943d796e5a5b04d163cad 2024-11-22T15:22:34,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/89286c1fabbf4e7cb7f0f5511e90bd3b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/89286c1fabbf4e7cb7f0f5511e90bd3b 2024-11-22T15:22:34,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6d0358d4f9be4a679654a2fd4f1eae53 2024-11-22T15:22:34,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/eaca8dd26ca547cabdcc38c622f349f4 2024-11-22T15:22:34,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b45c5789ce4446b0a2caa5a862490b8c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b45c5789ce4446b0a2caa5a862490b8c 2024-11-22T15:22:34,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3a702fdb647a4cd79db2cea53f541bfd 2024-11-22T15:22:34,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/b9b1df51a5de43e78d3249ca187853de 2024-11-22T15:22:34,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/344b1e183d14413980af448583ebe3bc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/344b1e183d14413980af448583ebe3bc 2024-11-22T15:22:34,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/d01783af10144f768279d0909527b953 2024-11-22T15:22:34,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1df02971e201472dade34f1e8fbe710b 2024-11-22T15:22:34,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/df5a33d6dccc4ebd929e38da34f21d49 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/df5a33d6dccc4ebd929e38da34f21d49 2024-11-22T15:22:34,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/5f9cfc5cefab442dab88d0cd7565764f 2024-11-22T15:22:34,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/66507102558c4d2a8bfe2a18da0fe789 2024-11-22T15:22:34,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9f7d13aa597a4dc1b193b4a66bd21909 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9f7d13aa597a4dc1b193b4a66bd21909 2024-11-22T15:22:34,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a421cd99cf03463c83d714c526ba5947 2024-11-22T15:22:34,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3f97ed1d2eca435abe80af72a27d8c04 2024-11-22T15:22:34,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/ca730382bd564cd490a7ab5a116475b5 2024-11-22T15:22:34,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e700e290e864229878ddc7bcd9c5c08 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/9e700e290e864229878ddc7bcd9c5c08 2024-11-22T15:22:34,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/11fb54a59af2484e860050b1bec6c033 2024-11-22T15:22:34,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/e0a2286047864900a64e781a448c762c 2024-11-22T15:22:34,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/15a65b1a524e4851819cd00d7ed59faa 2024-11-22T15:22:34,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/dbed00b94a0243ffbfc124b4e3feeb2e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/dbed00b94a0243ffbfc124b4e3feeb2e 2024-11-22T15:22:34,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/6f7381fe331c4d0c82b31a34fdea6b02 2024-11-22T15:22:34,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/a315f0b7cdee4c0faa5d66ae3fc3acc0 2024-11-22T15:22:34,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/1e265f9f52e943ac82098e1a791c9eb9 2024-11-22T15:22:34,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/0f016afd41bd4f00a68b0ca6ce0b7eda 2024-11-22T15:22:34,645 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/recovered.edits/662.seqid, newMaxSeqId=662, maxSeqId=1 2024-11-22T15:22:34,648 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f. 2024-11-22T15:22:34,648 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for ed6f777bba2efed5f759348895e3133f: 2024-11-22T15:22:34,649 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:34,650 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=ed6f777bba2efed5f759348895e3133f, regionState=CLOSED 2024-11-22T15:22:34,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-22T15:22:34,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure ed6f777bba2efed5f759348895e3133f, server=77927f992d0b,36033,1732288915809 in 1.6300 sec 2024-11-22T15:22:34,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-22T15:22:34,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed6f777bba2efed5f759348895e3133f, UNASSIGN in 1.6350 sec 2024-11-22T15:22:34,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-22T15:22:34,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6400 sec 2024-11-22T15:22:34,655 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288954655"}]},"ts":"1732288954655"} 2024-11-22T15:22:34,657 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:22:34,670 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:22:34,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6740 sec 2024-11-22T15:22:35,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:22:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-22T15:22:35,109 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-22T15:22:35,117 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:22:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,122 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,123 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-22T15:22:35,127 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:35,130 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/recovered.edits] 2024-11-22T15:22:35,133 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/04a1a4ac25f04cc6a4304c18e5e411fc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/04a1a4ac25f04cc6a4304c18e5e411fc 2024-11-22T15:22:35,134 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/7d1e3ed5f8744a1e98a34591aeef4372 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/A/7d1e3ed5f8744a1e98a34591aeef4372 2024-11-22T15:22:35,137 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2eebc6ece97741cb8dae35a87087a2cc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/2eebc6ece97741cb8dae35a87087a2cc 2024-11-22T15:22:35,138 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6bf1786648364787bb910bf93040484d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/B/6bf1786648364787bb910bf93040484d 2024-11-22T15:22:35,140 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3c1b381a753a484d9a69e28a6841e957 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/3c1b381a753a484d9a69e28a6841e957 2024-11-22T15:22:35,141 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/cca7588b8365465b8ad459242d63db8c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/C/cca7588b8365465b8ad459242d63db8c 2024-11-22T15:22:35,143 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/recovered.edits/662.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f/recovered.edits/662.seqid 2024-11-22T15:22:35,144 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed6f777bba2efed5f759348895e3133f 2024-11-22T15:22:35,144 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:22:35,149 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-22T15:22:35,155 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:22:35,183 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:22:35,184 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,184 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:22:35,184 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732288955184"}]},"ts":"9223372036854775807"} 2024-11-22T15:22:35,187 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:22:35,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ed6f777bba2efed5f759348895e3133f, NAME => 'TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:22:35,188 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:22:35,188 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732288955188"}]},"ts":"9223372036854775807"} 2024-11-22T15:22:35,191 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:22:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-22T15:22:35,233 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 115 msec 2024-11-22T15:22:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-22T15:22:35,426 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-22T15:22:35,440 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: RS:0;77927f992d0b:36033-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/77927f992d0b:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a38d96a-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a38d96a-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-565273453_22 at /127.0.0.1:44030 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x6a38d96a-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x6a38d96a-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=653 (was 415) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4439 (was 4916) 2024-11-22T15:22:35,449 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=653, ProcessCount=11, AvailableMemoryMB=4439 2024-11-22T15:22:35,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:22:35,451 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:22:35,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:35,453 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:22:35,453 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:35,453 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-22T15:22:35,454 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:22:35,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:35,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741985_1161 (size=963) 2024-11-22T15:22:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:35,867 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:22:35,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741986_1162 (size=53) 2024-11-22T15:22:36,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:36,275 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:36,276 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c30eef12e99d24ea8c4e5ace242daf20, disabling compactions & flushes 2024-11-22T15:22:36,276 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,276 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,276 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. after waiting 0 ms 2024-11-22T15:22:36,276 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,276 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,276 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:36,277 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:22:36,277 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732288956277"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732288956277"}]},"ts":"1732288956277"} 2024-11-22T15:22:36,278 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:22:36,279 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:22:36,280 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288956280"}]},"ts":"1732288956280"} 2024-11-22T15:22:36,281 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:22:36,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, ASSIGN}] 2024-11-22T15:22:36,300 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, ASSIGN 2024-11-22T15:22:36,301 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:22:36,452 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:36,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:36,609 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:36,615 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,615 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:22:36,616 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,616 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:36,617 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,617 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,620 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,623 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:36,623 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName A 2024-11-22T15:22:36,623 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:36,624 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:36,625 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,626 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:36,627 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName B 2024-11-22T15:22:36,627 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:36,628 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:36,628 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,629 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:36,630 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName C 2024-11-22T15:22:36,630 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:36,630 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:36,630 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,632 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,632 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,634 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:22:36,636 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:36,638 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:22:36,639 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened c30eef12e99d24ea8c4e5ace242daf20; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60385255, jitterRate=-0.10018958151340485}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:22:36,640 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:36,641 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., pid=41, masterSystemTime=1732288956609 2024-11-22T15:22:36,643 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,643 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:36,644 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:36,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-22T15:22:36,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 in 190 msec 2024-11-22T15:22:36,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-22T15:22:36,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, ASSIGN in 349 msec 2024-11-22T15:22:36,651 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:22:36,651 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288956651"}]},"ts":"1732288956651"} 2024-11-22T15:22:36,652 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:22:36,662 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:22:36,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2110 sec 2024-11-22T15:22:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-22T15:22:37,564 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-22T15:22:37,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04ddf4c3 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ff872d8 2024-11-22T15:22:37,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4506927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:37,585 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:37,587 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:37,588 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:22:37,589 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:22:37,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:22:37,595 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:22:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T15:22:37,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741987_1163 (size=999) 2024-11-22T15:22:38,012 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T15:22:38,012 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T15:22:38,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:22:38,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, REOPEN/MOVE}] 2024-11-22T15:22:38,025 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, REOPEN/MOVE 2024-11-22T15:22:38,025 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,026 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:22:38,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:38,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,179 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,179 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:22:38,179 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing c30eef12e99d24ea8c4e5ace242daf20, disabling compactions & flushes 2024-11-22T15:22:38,179 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,179 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,179 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. after waiting 0 ms 2024-11-22T15:22:38,179 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,185 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T15:22:38,187 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,187 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:38,187 WARN [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: c30eef12e99d24ea8c4e5ace242daf20 to self. 2024-11-22T15:22:38,189 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,190 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=CLOSED 2024-11-22T15:22:38,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-22T15:22:38,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 in 165 msec 2024-11-22T15:22:38,193 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, REOPEN/MOVE; state=CLOSED, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=true 2024-11-22T15:22:38,344 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:22:38,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,505 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,505 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:22:38,506 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,506 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:22:38,506 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,507 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,510 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,511 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:38,517 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName A 2024-11-22T15:22:38,519 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:38,519 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:38,519 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,520 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:38,520 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName B 2024-11-22T15:22:38,520 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:38,521 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:38,521 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,521 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:22:38,522 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c30eef12e99d24ea8c4e5ace242daf20 columnFamilyName C 2024-11-22T15:22:38,522 DEBUG [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:38,522 INFO [StoreOpener-c30eef12e99d24ea8c4e5ace242daf20-1 {}] regionserver.HStore(327): Store=c30eef12e99d24ea8c4e5ace242daf20/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:22:38,522 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,523 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,524 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,525 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:22:38,527 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,528 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened c30eef12e99d24ea8c4e5ace242daf20; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67336997, jitterRate=0.0033994466066360474}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:22:38,529 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:38,530 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., pid=46, masterSystemTime=1732288958498 2024-11-22T15:22:38,531 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,531 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,532 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=OPEN, openSeqNum=5, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-22T15:22:38,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 in 187 msec 2024-11-22T15:22:38,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-22T15:22:38,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, REOPEN/MOVE in 510 msec 2024-11-22T15:22:38,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-22T15:22:38,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 521 msec 2024-11-22T15:22:38,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 942 msec 2024-11-22T15:22:38,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-22T15:22:38,546 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29458edd to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46c2c778 2024-11-22T15:22:38,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79982672, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,604 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c7d6279 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@433e2b26 2024-11-22T15:22:38,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b4bd1ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,618 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328f994d to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e3a4420 2024-11-22T15:22:38,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ebda6ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,631 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a9306be to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24f64590 2024-11-22T15:22:38,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505d5ccd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x769942d9 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c5c4716 2024-11-22T15:22:38,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@465dc764, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,668 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-11-22T15:22:38,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-11-22T15:22:38,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,693 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b727d6e to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14c16cd4 2024-11-22T15:22:38,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a52344f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,706 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c7940d9 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@341384e 2024-11-22T15:22:38,717 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ba8425, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:22:38,720 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:38,720 DEBUG [hconnection-0x1e22067d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,720 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,720 DEBUG [hconnection-0x31153c1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,720 DEBUG [hconnection-0x10cb5edf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,721 DEBUG [hconnection-0x57c53fe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,721 DEBUG [hconnection-0x38f3bfad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-22T15:22:38,721 DEBUG [hconnection-0x3afb7273-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,722 DEBUG [hconnection-0x623f48b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,722 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58678, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,723 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,726 DEBUG [hconnection-0x78b8fca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:22:38,728 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:38,728 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,729 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:38,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:38,732 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:22:38,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:38,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:38,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:38,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:38,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:38,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:38,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:38,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:38,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122436118fb2ebd45749bb20d52a2bd3cbd_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288958735/Put/seqid=0 2024-11-22T15:22:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741988_1164 (size=12154) 2024-11-22T15:22:38,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:38,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289018827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289018827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289018828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289018829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289018830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:38,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:38,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:38,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:38,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289018932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289018933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289018933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289018933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289018934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:39,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289019136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289019140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289019140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289019141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289019142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,213 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:39,218 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122436118fb2ebd45749bb20d52a2bd3cbd_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122436118fb2ebd45749bb20d52a2bd3cbd_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:39,220 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/bbe688e57dfb4609a0a84b4a198dca57, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:39,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/bbe688e57dfb4609a0a84b4a198dca57 is 175, key is test_row_0/A:col10/1732288958735/Put/seqid=0 2024-11-22T15:22:39,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741989_1165 (size=30955) 2024-11-22T15:22:39,246 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/bbe688e57dfb4609a0a84b4a198dca57 2024-11-22T15:22:39,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e16db852644540149b4cc2120ca0d78f is 50, key is test_row_0/B:col10/1732288958735/Put/seqid=0 2024-11-22T15:22:39,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741990_1166 (size=12001) 2024-11-22T15:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:39,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289019442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289019445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289019445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289019445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289019447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,654 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e16db852644540149b4cc2120ca0d78f 2024-11-22T15:22:39,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e5144371e8ef45b88889289ed515fdb7 is 50, key is test_row_0/C:col10/1732288958735/Put/seqid=0 2024-11-22T15:22:39,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741991_1167 (size=12001) 2024-11-22T15:22:39,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e5144371e8ef45b88889289ed515fdb7 2024-11-22T15:22:39,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/bbe688e57dfb4609a0a84b4a198dca57 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57 2024-11-22T15:22:39,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57, entries=150, sequenceid=15, filesize=30.2 K 2024-11-22T15:22:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e16db852644540149b4cc2120ca0d78f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f 2024-11-22T15:22:39,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f, entries=150, sequenceid=15, filesize=11.7 K 2024-11-22T15:22:39,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e5144371e8ef45b88889289ed515fdb7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7 2024-11-22T15:22:39,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-22T15:22:39,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c30eef12e99d24ea8c4e5ace242daf20 in 1078ms, sequenceid=15, compaction requested=false 2024-11-22T15:22:39,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:39,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:39,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:39,961 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:39,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289019958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289019959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289019959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122abb3099f6d45438a8806a7304892f914_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288958829/Put/seqid=0 2024-11-22T15:22:39,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:39,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:39,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289019961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289019964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741992_1168 (size=14594) 2024-11-22T15:22:40,005 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:40,009 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122abb3099f6d45438a8806a7304892f914_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122abb3099f6d45438a8806a7304892f914_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:40,010 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5522076e51604a3e8659c5868d24d567, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:40,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5522076e51604a3e8659c5868d24d567 is 175, key is test_row_0/A:col10/1732288958829/Put/seqid=0 2024-11-22T15:22:40,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741993_1169 (size=39549) 2024-11-22T15:22:40,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289020065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289020066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289020067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289020069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289020071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:40,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,196 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:22:40,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:40,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:40,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289020269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289020271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289020271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289020273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289020271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,421 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5522076e51604a3e8659c5868d24d567 2024-11-22T15:22:40,425 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:40,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:40,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/fdf803244299492db4cc0fed08af070e is 50, key is test_row_0/B:col10/1732288958829/Put/seqid=0 2024-11-22T15:22:40,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741994_1170 (size=12001) 2024-11-22T15:22:40,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/fdf803244299492db4cc0fed08af070e 2024-11-22T15:22:40,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/66cbc3c1e2eb433991bacc269d007ce5 is 50, key is test_row_0/C:col10/1732288958829/Put/seqid=0 2024-11-22T15:22:40,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741995_1171 (size=12001) 2024-11-22T15:22:40,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/66cbc3c1e2eb433991bacc269d007ce5 2024-11-22T15:22:40,578 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:40,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289020574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289020580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289020580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5522076e51604a3e8659c5868d24d567 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567 2024-11-22T15:22:40,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289020583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567, entries=200, sequenceid=41, filesize=38.6 K 2024-11-22T15:22:40,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:40,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289020588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/fdf803244299492db4cc0fed08af070e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e 2024-11-22T15:22:40,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T15:22:40,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/66cbc3c1e2eb433991bacc269d007ce5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5 2024-11-22T15:22:40,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T15:22:40,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c30eef12e99d24ea8c4e5ace242daf20 in 662ms, sequenceid=41, compaction requested=false 2024-11-22T15:22:40,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:40,735 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:40,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-22T15:22:40,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:40,736 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:40,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:40,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a8397ecbfcc14154aa911a34fdddf7ca_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288959961/Put/seqid=0 2024-11-22T15:22:40,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741996_1172 (size=12154) 2024-11-22T15:22:40,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:40,819 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a8397ecbfcc14154aa911a34fdddf7ca_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8397ecbfcc14154aa911a34fdddf7ca_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:40,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7eef65639afb4899a3b69efc41b1291e, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:40,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7eef65639afb4899a3b69efc41b1291e is 175, key is test_row_0/A:col10/1732288959961/Put/seqid=0 2024-11-22T15:22:40,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:40,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741997_1173 (size=30955) 2024-11-22T15:22:40,859 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7eef65639afb4899a3b69efc41b1291e 2024-11-22T15:22:40,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6e06b0d962049ca9a127b1b6e2b6925 is 50, key is test_row_0/B:col10/1732288959961/Put/seqid=0 2024-11-22T15:22:40,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741998_1174 (size=12001) 2024-11-22T15:22:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:41,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:41,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289021154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289021154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289021159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289021162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289021163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,258 INFO [master/77927f992d0b:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T15:22:41,258 INFO [master/77927f992d0b:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T15:22:41,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289021268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289021268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289021270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289021268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289021271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,373 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6e06b0d962049ca9a127b1b6e2b6925 2024-11-22T15:22:41,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/1c996d0ab45a4858a8972c5fdf44165d is 50, key is test_row_0/C:col10/1732288959961/Put/seqid=0 2024-11-22T15:22:41,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741999_1175 (size=12001) 2024-11-22T15:22:41,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289021484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289021485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289021485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289021486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289021486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289021793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289021799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289021799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289021806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:41,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289021808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:41,818 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/1c996d0ab45a4858a8972c5fdf44165d 2024-11-22T15:22:41,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7eef65639afb4899a3b69efc41b1291e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e 2024-11-22T15:22:41,831 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e, entries=150, sequenceid=51, filesize=30.2 K 2024-11-22T15:22:41,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6e06b0d962049ca9a127b1b6e2b6925 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925 2024-11-22T15:22:41,839 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T15:22:41,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/1c996d0ab45a4858a8972c5fdf44165d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d 2024-11-22T15:22:41,851 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T15:22:41,852 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for c30eef12e99d24ea8c4e5ace242daf20 in 1117ms, sequenceid=51, compaction requested=true 2024-11-22T15:22:41,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:41,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:41,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-22T15:22:41,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-22T15:22:41,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-22T15:22:41,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1340 sec 2024-11-22T15:22:41,868 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.1460 sec 2024-11-22T15:22:42,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:42,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:42,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289022325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289022325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289022327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289022328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289022331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f04756e0f59f40feb8d3b616b763e456_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:42,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742000_1176 (size=14594) 2024-11-22T15:22:42,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289022432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289022433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289022433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289022443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289022432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289022645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289022645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289022646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289022649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289022660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,820 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:42,825 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f04756e0f59f40feb8d3b616b763e456_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f04756e0f59f40feb8d3b616b763e456_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:42,827 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/104bd25252c745ef9dfe0f7add7a12d3, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:42,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/104bd25252c745ef9dfe0f7add7a12d3 is 175, key is test_row_0/A:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-22T15:22:42,830 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-22T15:22:42,833 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-22T15:22:42,835 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:42,835 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:42,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:42,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742001_1177 (size=39549) 2024-11-22T15:22:42,840 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/104bd25252c745ef9dfe0f7add7a12d3 2024-11-22T15:22:42,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/4be0a8483c5941b18c60b8dfbe6c8ef8 is 50, key is test_row_0/B:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:42,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742002_1178 (size=12001) 2024-11-22T15:22:42,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/4be0a8483c5941b18c60b8dfbe6c8ef8 2024-11-22T15:22:42,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ac28d40a110d454899960bb778952e7a is 50, key is test_row_0/C:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:42,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289022953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289022954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289022953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289022959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:42,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289022966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:42,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T15:22:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:42,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:42,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:42,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:42,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742003_1179 (size=12001) 2024-11-22T15:22:43,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:43,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T15:22:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T15:22:43,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:43,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:43,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ac28d40a110d454899960bb778952e7a 2024-11-22T15:22:43,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/104bd25252c745ef9dfe0f7add7a12d3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3 2024-11-22T15:22:43,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3, entries=200, sequenceid=79, filesize=38.6 K 2024-11-22T15:22:43,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/4be0a8483c5941b18c60b8dfbe6c8ef8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8 2024-11-22T15:22:43,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8, entries=150, sequenceid=79, filesize=11.7 K 2024-11-22T15:22:43,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ac28d40a110d454899960bb778952e7a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a 2024-11-22T15:22:43,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a, entries=150, sequenceid=79, filesize=11.7 K 2024-11-22T15:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:43,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for c30eef12e99d24ea8c4e5ace242daf20 in 1133ms, sequenceid=79, compaction requested=true 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:43,439 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:43,439 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:43,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:43,444 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:43,444 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141008 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:43,444 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:43,444 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:43,444 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,444 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,444 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=46.9 K 2024-11-22T15:22:43,444 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=137.7 K 2024-11-22T15:22:43,444 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,444 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3] 2024-11-22T15:22:43,445 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e16db852644540149b4cc2120ca0d78f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732288958731 2024-11-22T15:22:43,445 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbe688e57dfb4609a0a84b4a198dca57, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732288958731 2024-11-22T15:22:43,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5522076e51604a3e8659c5868d24d567, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732288958826 2024-11-22T15:22:43,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting fdf803244299492db4cc0fed08af070e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732288958828 2024-11-22T15:22:43,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c6e06b0d962049ca9a127b1b6e2b6925, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732288959955 2024-11-22T15:22:43,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7eef65639afb4899a3b69efc41b1291e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732288959955 2024-11-22T15:22:43,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4be0a8483c5941b18c60b8dfbe6c8ef8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961158 2024-11-22T15:22:43,448 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 104bd25252c745ef9dfe0f7add7a12d3, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961150 2024-11-22T15:22:43,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-22T15:22:43,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:43,453 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:43,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:43,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:43,479 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#158 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:43,479 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:43,480 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/bdffba8f9c4640b7add7dc79a55cbfed is 50, key is test_row_0/B:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:43,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112291398ce2d3bf457f8eb4bbd6680e1029_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288962326/Put/seqid=0 2024-11-22T15:22:43,504 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112225ea04b42b67496bb933857732cc7d2a_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:43,510 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112225ea04b42b67496bb933857732cc7d2a_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:43,510 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112225ea04b42b67496bb933857732cc7d2a_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:43,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289023544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289023546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289023548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289023553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289023553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742005_1181 (size=12154) 2024-11-22T15:22:43,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742004_1180 (size=12139) 2024-11-22T15:22:43,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742006_1182 (size=4469) 2024-11-22T15:22:43,630 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#159 average throughput is 0.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:43,632 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/2b970bb950294ddba529e21c160e436a is 175, key is test_row_0/A:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:43,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289023656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289023657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289023655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742007_1183 (size=31093) 2024-11-22T15:22:43,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289023670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289023671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289023869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289023871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289023873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289023891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:43,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289023892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:43,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:44,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:44,008 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112291398ce2d3bf457f8eb4bbd6680e1029_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112291398ce2d3bf457f8eb4bbd6680e1029_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:44,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/477acd5f1fd3424da03a1da7d9727f79, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:44,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/477acd5f1fd3424da03a1da7d9727f79 is 175, key is test_row_0/A:col10/1732288962326/Put/seqid=0 2024-11-22T15:22:44,025 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/bdffba8f9c4640b7add7dc79a55cbfed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bdffba8f9c4640b7add7dc79a55cbfed 2024-11-22T15:22:44,035 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into bdffba8f9c4640b7add7dc79a55cbfed(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:44,035 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:44,035 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=12, startTime=1732288963439; duration=0sec 2024-11-22T15:22:44,035 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:44,035 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:44,035 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:44,037 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:44,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:44,038 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:44,038 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=46.9 K 2024-11-22T15:22:44,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e5144371e8ef45b88889289ed515fdb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732288958731 2024-11-22T15:22:44,039 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 66cbc3c1e2eb433991bacc269d007ce5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732288958828 2024-11-22T15:22:44,039 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c996d0ab45a4858a8972c5fdf44165d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732288959955 2024-11-22T15:22:44,039 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ac28d40a110d454899960bb778952e7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961158 2024-11-22T15:22:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742008_1184 (size=30955) 2024-11-22T15:22:44,062 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=87, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/477acd5f1fd3424da03a1da7d9727f79 2024-11-22T15:22:44,073 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#161 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:44,074 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7b2226af89f14170a912d72111904c51 is 50, key is test_row_0/C:col10/1732288961158/Put/seqid=0 2024-11-22T15:22:44,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/44683ba1603a4af78a8114fffb424e07 is 50, key is test_row_0/B:col10/1732288962326/Put/seqid=0 2024-11-22T15:22:44,088 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/2b970bb950294ddba529e21c160e436a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a 2024-11-22T15:22:44,098 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into 2b970bb950294ddba529e21c160e436a(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:44,098 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:44,098 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=12, startTime=1732288963439; duration=0sec 2024-11-22T15:22:44,098 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:44,098 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742009_1185 (size=12139) 2024-11-22T15:22:44,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742010_1186 (size=12001) 2024-11-22T15:22:44,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289024175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289024177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289024182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289024198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289024198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,554 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7b2226af89f14170a912d72111904c51 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7b2226af89f14170a912d72111904c51 2024-11-22T15:22:44,561 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 7b2226af89f14170a912d72111904c51(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:44,561 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:44,562 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=12, startTime=1732288963439; duration=0sec 2024-11-22T15:22:44,562 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:44,562 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:44,570 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/44683ba1603a4af78a8114fffb424e07 2024-11-22T15:22:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6cc2ebca5242437cbb848b89ff2cad78 is 50, key is test_row_0/C:col10/1732288962326/Put/seqid=0 2024-11-22T15:22:44,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742011_1187 (size=12001) 2024-11-22T15:22:44,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289024682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289024690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289024694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289024706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:44,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289024711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:45,033 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6cc2ebca5242437cbb848b89ff2cad78 2024-11-22T15:22:45,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/477acd5f1fd3424da03a1da7d9727f79 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79 2024-11-22T15:22:45,045 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79, entries=150, sequenceid=87, filesize=30.2 K 2024-11-22T15:22:45,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/44683ba1603a4af78a8114fffb424e07 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07 2024-11-22T15:22:45,053 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07, entries=150, sequenceid=87, filesize=11.7 K 2024-11-22T15:22:45,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6cc2ebca5242437cbb848b89ff2cad78 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78 2024-11-22T15:22:45,060 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78, entries=150, sequenceid=87, filesize=11.7 K 2024-11-22T15:22:45,061 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for c30eef12e99d24ea8c4e5ace242daf20 in 1609ms, sequenceid=87, compaction requested=false 2024-11-22T15:22:45,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:45,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:45,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-22T15:22:45,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-22T15:22:45,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-22T15:22:45,064 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2270 sec 2024-11-22T15:22:45,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.2310 sec 2024-11-22T15:22:45,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:22:45,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-22T15:22:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:45,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:45,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:45,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289025708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289025712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289025712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289025715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ce3e3913927346bdb0a35470123b639f_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:45,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289025721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742012_1188 (size=14594) 2024-11-22T15:22:45,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289025815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289025819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:45,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289025819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289026019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289026022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289026027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,154 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,159 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ce3e3913927346bdb0a35470123b639f_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ce3e3913927346bdb0a35470123b639f_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:46,160 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ba095335efa942b9b5d38d0ff5d20601, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:46,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ba095335efa942b9b5d38d0ff5d20601 is 175, key is test_row_0/A:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:46,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742013_1189 (size=39549) 2024-11-22T15:22:46,201 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ba095335efa942b9b5d38d0ff5d20601 2024-11-22T15:22:46,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/d8adb163832a4619b99d39cd7335cd82 is 50, key is test_row_0/B:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:46,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742014_1190 (size=12001) 2024-11-22T15:22:46,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289026325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289026327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289026332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:46,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/d8adb163832a4619b99d39cd7335cd82 2024-11-22T15:22:46,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/991173ab1c9b4afbb5b6654a0a7fb270 is 50, key is test_row_0/C:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:46,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742015_1191 (size=12001) 2024-11-22T15:22:46,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/991173ab1c9b4afbb5b6654a0a7fb270 2024-11-22T15:22:46,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ba095335efa942b9b5d38d0ff5d20601 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601 2024-11-22T15:22:46,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601, entries=200, sequenceid=119, filesize=38.6 K 2024-11-22T15:22:46,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/d8adb163832a4619b99d39cd7335cd82 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82 2024-11-22T15:22:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82, entries=150, sequenceid=119, filesize=11.7 K 2024-11-22T15:22:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/991173ab1c9b4afbb5b6654a0a7fb270 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270 2024-11-22T15:22:46,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270, entries=150, sequenceid=119, filesize=11.7 K 2024-11-22T15:22:46,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for c30eef12e99d24ea8c4e5ace242daf20 in 1069ms, sequenceid=119, compaction requested=true 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:46,765 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:46,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:22:46,765 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:46,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,766 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:46,766 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:46,766 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:46,767 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:46,767 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bdffba8f9c4640b7add7dc79a55cbfed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=35.3 K 2024-11-22T15:22:46,767 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:46,767 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,767 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=99.2 K 2024-11-22T15:22:46,767 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:46,767 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601] 2024-11-22T15:22:46,767 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bdffba8f9c4640b7add7dc79a55cbfed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961158 2024-11-22T15:22:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,768 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 44683ba1603a4af78a8114fffb424e07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732288962317 2024-11-22T15:22:46,768 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b970bb950294ddba529e21c160e436a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961158 2024-11-22T15:22:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,768 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 477acd5f1fd3424da03a1da7d9727f79, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732288962317 2024-11-22T15:22:46,768 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d8adb163832a4619b99d39cd7335cd82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:46,768 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba095335efa942b9b5d38d0ff5d20601, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,803 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,825 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#168 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,826 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/88068f038c5a4b8b91a245f3b12a8434 is 50, key is test_row_0/B:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,833 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122612e97267fff41438d6c9165132f5445_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,841 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122612e97267fff41438d6c9165132f5445_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:46,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,841 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122612e97267fff41438d6c9165132f5445_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742016_1192 (size=12241) 2024-11-22T15:22:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,873 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/88068f038c5a4b8b91a245f3b12a8434 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/88068f038c5a4b8b91a245f3b12a8434 2024-11-22T15:22:46,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742017_1193 (size=4469) 2024-11-22T15:22:46,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:46,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:22:46,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:46,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:46,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:46,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:46,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:46,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,924 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into 88068f038c5a4b8b91a245f3b12a8434(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,924 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:46,924 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=13, startTime=1732288966765; duration=0sec 2024-11-22T15:22:46,924 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:46,924 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:46,924 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:46,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:46,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:46,928 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:46,928 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7b2226af89f14170a912d72111904c51, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=35.3 K 2024-11-22T15:22:46,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b2226af89f14170a912d72111904c51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732288961158 2024-11-22T15:22:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,929 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cc2ebca5242437cbb848b89ff2cad78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732288962317 2024-11-22T15:22:46,929 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 991173ab1c9b4afbb5b6654a0a7fb270, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:46,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:46,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-22T15:22:46,942 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-22T15:22:46,947 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:46,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ad689db6d84140cca091093694d58be5_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288966913/Put/seqid=0 2024-11-22T15:22:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-22T15:22:46,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:46,952 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:46,952 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:46,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:46,953 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#169 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:46,954 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/8814a9375fee47e0a6c8d9abb0972bf5 is 50, key is test_row_0/C:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:46,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742019_1195 (size=12241) 2024-11-22T15:22:46,990 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/8814a9375fee47e0a6c8d9abb0972bf5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8814a9375fee47e0a6c8d9abb0972bf5 2024-11-22T15:22:46,998 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 8814a9375fee47e0a6c8d9abb0972bf5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:46,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:46,998 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=13, startTime=1732288966765; duration=0sec 2024-11-22T15:22:46,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:46,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742018_1194 (size=19574) 2024-11-22T15:22:47,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:47,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289027105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289027106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289027113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289027221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289027223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289027226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:47,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,308 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#167 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:47,309 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/f6501987ec2d46a798d0481f92454973 is 175, key is test_row_0/A:col10/1732288963524/Put/seqid=0 2024-11-22T15:22:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742020_1196 (size=31195) 2024-11-22T15:22:47,356 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/f6501987ec2d46a798d0481f92454973 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973 2024-11-22T15:22:47,362 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into f6501987ec2d46a798d0481f92454973(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:47,362 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:47,362 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=13, startTime=1732288966765; duration=0sec 2024-11-22T15:22:47,362 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:47,363 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:47,416 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,421 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:47,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,427 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ad689db6d84140cca091093694d58be5_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad689db6d84140cca091093694d58be5_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:47,428 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/730423c297854debaed2d150e43ba385, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:47,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/730423c297854debaed2d150e43ba385 is 175, key is test_row_0/A:col10/1732288966913/Put/seqid=0 2024-11-22T15:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289027426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289027426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289027429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742021_1197 (size=56833) 2024-11-22T15:22:47,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:47,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289027734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289027734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289027738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289027739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:47,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289027740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,747 DEBUG [Thread-787 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:47,747 DEBUG [Thread-783 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:47,879 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/730423c297854debaed2d150e43ba385 2024-11-22T15:22:47,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:47,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:47,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:47,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:47,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:47,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9234b8852b4946edab595e3e4cbd54e4 is 50, key is test_row_0/B:col10/1732288966913/Put/seqid=0 2024-11-22T15:22:47,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742022_1198 (size=12101) 2024-11-22T15:22:47,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9234b8852b4946edab595e3e4cbd54e4 2024-11-22T15:22:47,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/2428507d4d3b44239c68b56f5a34593a is 50, key is test_row_0/C:col10/1732288966913/Put/seqid=0 2024-11-22T15:22:48,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742023_1199 (size=12101) 2024-11-22T15:22:48,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/2428507d4d3b44239c68b56f5a34593a 2024-11-22T15:22:48,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/730423c297854debaed2d150e43ba385 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385 2024-11-22T15:22:48,045 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:48,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:48,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:48,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:48,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:48,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:48,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:48,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385, entries=300, sequenceid=130, filesize=55.5 K 2024-11-22T15:22:48,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9234b8852b4946edab595e3e4cbd54e4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4 2024-11-22T15:22:48,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4, entries=150, sequenceid=130, filesize=11.8 K 2024-11-22T15:22:48,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/2428507d4d3b44239c68b56f5a34593a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a 2024-11-22T15:22:48,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a, entries=150, sequenceid=130, filesize=11.8 K 2024-11-22T15:22:48,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c30eef12e99d24ea8c4e5ace242daf20 in 1167ms, sequenceid=130, compaction requested=false 2024-11-22T15:22:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:48,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-22T15:22:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:48,204 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:48,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112248958d2a72ee48c5b3d6e83c8a955249_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288967095/Put/seqid=0 2024-11-22T15:22:48,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:48,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:48,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742024_1200 (size=12304) 2024-11-22T15:22:48,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289028284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289028308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289028309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289028410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289028414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289028415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289028612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289028622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289028623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:48,686 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112248958d2a72ee48c5b3d6e83c8a955249_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248958d2a72ee48c5b3d6e83c8a955249_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:48,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a805230534c34ba7bf1b71d5e5feb53e, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:48,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a805230534c34ba7bf1b71d5e5feb53e is 175, key is test_row_0/A:col10/1732288967095/Put/seqid=0 2024-11-22T15:22:48,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742025_1201 (size=31105) 2024-11-22T15:22:48,721 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a805230534c34ba7bf1b71d5e5feb53e 2024-11-22T15:22:48,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 is 50, key is test_row_0/B:col10/1732288967095/Put/seqid=0 2024-11-22T15:22:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742026_1202 (size=12151) 2024-11-22T15:22:48,804 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 2024-11-22T15:22:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e1f7fd9908f74e8a85c9ce23f5cfe275 is 50, key is test_row_0/C:col10/1732288967095/Put/seqid=0 2024-11-22T15:22:48,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742027_1203 (size=12151) 2024-11-22T15:22:48,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289028916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289028926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:48,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:48,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289028928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:49,273 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e1f7fd9908f74e8a85c9ce23f5cfe275 2024-11-22T15:22:49,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a805230534c34ba7bf1b71d5e5feb53e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e 2024-11-22T15:22:49,292 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e, entries=150, sequenceid=158, filesize=30.4 K 2024-11-22T15:22:49,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 2024-11-22T15:22:49,301 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6, entries=150, sequenceid=158, filesize=11.9 K 2024-11-22T15:22:49,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/e1f7fd9908f74e8a85c9ce23f5cfe275 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275 2024-11-22T15:22:49,320 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275, entries=150, sequenceid=158, filesize=11.9 K 2024-11-22T15:22:49,321 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c30eef12e99d24ea8c4e5ace242daf20 in 1117ms, sequenceid=158, compaction requested=true 2024-11-22T15:22:49,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:49,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:49,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-22T15:22:49,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-22T15:22:49,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-22T15:22:49,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3700 sec 2024-11-22T15:22:49,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.3770 sec 2024-11-22T15:22:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:49,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:22:49,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:49,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:49,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:49,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:49,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:49,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:49,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e9259034197f4817a83ec34c132dfba4_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:49,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742028_1204 (size=14794) 2024-11-22T15:22:49,497 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:49,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289029492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,503 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e9259034197f4817a83ec34c132dfba4_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e9259034197f4817a83ec34c132dfba4_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:49,504 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9db998a0c8af45fd9bbb44ff5296d0e5, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:49,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9db998a0c8af45fd9bbb44ff5296d0e5 is 175, key is test_row_0/A:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:49,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289029499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289029497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742029_1205 (size=39749) 2024-11-22T15:22:49,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289029600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289029610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289029611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289029804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289029817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289029818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:49,945 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9db998a0c8af45fd9bbb44ff5296d0e5 2024-11-22T15:22:49,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/bd4d6d77076a447e8848a881a8195eae is 50, key is test_row_0/B:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:50,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742030_1206 (size=12151) 2024-11-22T15:22:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289030112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289030122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289030125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/bd4d6d77076a447e8848a881a8195eae 2024-11-22T15:22:50,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/8baa17766bb447fbb3f0b71442724ce1 is 50, key is test_row_0/C:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:50,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742031_1207 (size=12151) 2024-11-22T15:22:50,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289030620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289030629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289030636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:50,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/8baa17766bb447fbb3f0b71442724ce1 2024-11-22T15:22:50,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9db998a0c8af45fd9bbb44ff5296d0e5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5 2024-11-22T15:22:50,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5, entries=200, sequenceid=170, filesize=38.8 K 2024-11-22T15:22:50,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/bd4d6d77076a447e8848a881a8195eae as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae 2024-11-22T15:22:50,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae, entries=150, sequenceid=170, filesize=11.9 K 2024-11-22T15:22:50,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/8baa17766bb447fbb3f0b71442724ce1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1 2024-11-22T15:22:50,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1, entries=150, sequenceid=170, filesize=11.9 K 2024-11-22T15:22:50,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c30eef12e99d24ea8c4e5ace242daf20 in 1511ms, sequenceid=170, compaction requested=true 2024-11-22T15:22:50,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:50,940 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:50,942 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 158882 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:50,942 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:50,943 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:50,943 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=155.2 K 2024-11-22T15:22:50,943 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:50,943 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5] 2024-11-22T15:22:50,943 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6501987ec2d46a798d0481f92454973, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:50,944 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 730423c297854debaed2d150e43ba385, keycount=300, bloomtype=ROW, size=55.5 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732288965704 2024-11-22T15:22:50,944 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a805230534c34ba7bf1b71d5e5feb53e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732288967095 2024-11-22T15:22:50,944 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9db998a0c8af45fd9bbb44ff5296d0e5, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968251 2024-11-22T15:22:50,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:50,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:50,953 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:50,955 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:50,955 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:50,955 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:50,955 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/88068f038c5a4b8b91a245f3b12a8434, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=47.5 K 2024-11-22T15:22:50,955 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 88068f038c5a4b8b91a245f3b12a8434, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:50,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9234b8852b4946edab595e3e4cbd54e4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732288966912 2024-11-22T15:22:50,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 66c3ac0aeecb4292b3a6344c7fb7fbb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732288967095 2024-11-22T15:22:50,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bd4d6d77076a447e8848a881a8195eae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968262 2024-11-22T15:22:50,960 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:50,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:50,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:50,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:50,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:50,968 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122568cbff942eb40e3b1cb367f9ac5bfb3_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:50,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122568cbff942eb40e3b1cb367f9ac5bfb3_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:50,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122568cbff942eb40e3b1cb367f9ac5bfb3_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:50,981 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:50,982 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/6308d16ce77b4244a728032a5a2cf352 is 50, key is test_row_0/B:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:51,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742032_1208 (size=4469) 2024-11-22T15:22:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742033_1209 (size=12527) 2024-11-22T15:22:51,019 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#179 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:51,020 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/65bb600c18554f95a697b34f2179e93e is 175, key is test_row_0/A:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:51,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742034_1210 (size=31481) 2024-11-22T15:22:51,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-22T15:22:51,060 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-22T15:22:51,062 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:51,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-22T15:22:51,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T15:22:51,064 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:51,064 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:51,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T15:22:51,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-22T15:22:51,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:51,218 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:51,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d03c496966a04a6db87962ba36225466_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288969490/Put/seqid=0 2024-11-22T15:22:51,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742035_1211 (size=12304) 2024-11-22T15:22:51,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:51,293 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d03c496966a04a6db87962ba36225466_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d03c496966a04a6db87962ba36225466_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:51,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/dfca9b4c51724b9593d798a270c89ca3, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:51,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/dfca9b4c51724b9593d798a270c89ca3 is 175, key is test_row_0/A:col10/1732288969490/Put/seqid=0 2024-11-22T15:22:51,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742036_1212 (size=31105) 2024-11-22T15:22:51,345 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/dfca9b4c51724b9593d798a270c89ca3 2024-11-22T15:22:51,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/473b6c69adf24b1993b3e44b1f6a040c is 50, key is test_row_0/B:col10/1732288969490/Put/seqid=0 2024-11-22T15:22:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T15:22:51,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742037_1213 (size=12151) 2024-11-22T15:22:51,405 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/473b6c69adf24b1993b3e44b1f6a040c 2024-11-22T15:22:51,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/a65ec6b917d44c5da5d3f58d72710b7b is 50, key is test_row_0/C:col10/1732288969490/Put/seqid=0 2024-11-22T15:22:51,430 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/6308d16ce77b4244a728032a5a2cf352 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/6308d16ce77b4244a728032a5a2cf352 2024-11-22T15:22:51,437 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into 6308d16ce77b4244a728032a5a2cf352(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:51,437 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:51,437 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=12, startTime=1732288970953; duration=0sec 2024-11-22T15:22:51,438 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:51,438 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:51,438 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:22:51,440 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:22:51,440 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:51,440 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:51,440 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8814a9375fee47e0a6c8d9abb0972bf5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=47.5 K 2024-11-22T15:22:51,441 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8814a9375fee47e0a6c8d9abb0972bf5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732288963524 2024-11-22T15:22:51,442 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2428507d4d3b44239c68b56f5a34593a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732288966912 2024-11-22T15:22:51,442 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e1f7fd9908f74e8a85c9ce23f5cfe275, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732288967095 2024-11-22T15:22:51,443 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8baa17766bb447fbb3f0b71442724ce1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968262 2024-11-22T15:22:51,459 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/65bb600c18554f95a697b34f2179e93e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e 2024-11-22T15:22:51,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742038_1214 (size=12151) 2024-11-22T15:22:51,469 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into 65bb600c18554f95a697b34f2179e93e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:51,469 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:51,469 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=12, startTime=1732288970940; duration=0sec 2024-11-22T15:22:51,469 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:51,469 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:51,472 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#184 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:51,473 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/99405dcbd41a4d0da33d483fe8df1fe7 is 50, key is test_row_0/C:col10/1732288969424/Put/seqid=0 2024-11-22T15:22:51,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742039_1215 (size=12527) 2024-11-22T15:22:51,522 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/99405dcbd41a4d0da33d483fe8df1fe7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/99405dcbd41a4d0da33d483fe8df1fe7 2024-11-22T15:22:51,529 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 99405dcbd41a4d0da33d483fe8df1fe7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:51,529 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:51,529 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=12, startTime=1732288970965; duration=0sec 2024-11-22T15:22:51,529 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:51,529 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:51,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:51,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:51,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289031650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289031652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289031652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T15:22:51,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289031766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58688 deadline: 1732289031766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289031766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,768 DEBUG [Thread-787 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8215 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:51,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58678 deadline: 1732289031771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:51,772 DEBUG [Thread-783 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8218 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:51,867 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/a65ec6b917d44c5da5d3f58d72710b7b 2024-11-22T15:22:51,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/dfca9b4c51724b9593d798a270c89ca3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3 2024-11-22T15:22:51,880 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3, entries=150, sequenceid=194, filesize=30.4 K 2024-11-22T15:22:51,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/473b6c69adf24b1993b3e44b1f6a040c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c 2024-11-22T15:22:51,889 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T15:22:51,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/a65ec6b917d44c5da5d3f58d72710b7b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b 2024-11-22T15:22:51,903 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T15:22:51,904 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c30eef12e99d24ea8c4e5ace242daf20 in 686ms, sequenceid=194, compaction requested=false 2024-11-22T15:22:51,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:51,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:51,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-22T15:22:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-22T15:22:51,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-22T15:22:51,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 842 msec 2024-11-22T15:22:51,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 847 msec 2024-11-22T15:22:51,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:51,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:51,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:51,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b0bcb597a8e542e4be6feae65877c5d3_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:52,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742040_1216 (size=17284) 2024-11-22T15:22:52,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289032046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289032048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289032153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289032155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-22T15:22:52,169 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-22T15:22:52,170 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:52,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-22T15:22:52,172 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:52,172 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:52,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:52,325 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:52,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:52,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289032359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289032360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,445 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:52,450 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b0bcb597a8e542e4be6feae65877c5d3_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0bcb597a8e542e4be6feae65877c5d3_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:52,456 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c643a7fb711440ca19dd9d5fd93ad19, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:52,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c643a7fb711440ca19dd9d5fd93ad19 is 175, key is test_row_0/A:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:52,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742041_1217 (size=48389) 2024-11-22T15:22:52,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:52,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:52,479 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c643a7fb711440ca19dd9d5fd93ad19 2024-11-22T15:22:52,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49c606cf47844c98b58d3db2ee0969ae is 50, key is test_row_0/B:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:52,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742042_1218 (size=12151) 2024-11-22T15:22:52,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49c606cf47844c98b58d3db2ee0969ae 2024-11-22T15:22:52,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7ce6abde3d0941f0ae2990d916e72f6f is 50, key is test_row_0/C:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:52,638 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742043_1219 (size=12151) 2024-11-22T15:22:52,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289032663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289032665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:52,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:52,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:52,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:52,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:52,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:52,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:52,992 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:22:53,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7ce6abde3d0941f0ae2990d916e72f6f 2024-11-22T15:22:53,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c643a7fb711440ca19dd9d5fd93ad19 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19 2024-11-22T15:22:53,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19, entries=250, sequenceid=211, filesize=47.3 K 2024-11-22T15:22:53,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49c606cf47844c98b58d3db2ee0969ae as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae 2024-11-22T15:22:53,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T15:22:53,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7ce6abde3d0941f0ae2990d916e72f6f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f 2024-11-22T15:22:53,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T15:22:53,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c30eef12e99d24ea8c4e5ace242daf20 in 1105ms, sequenceid=211, compaction requested=true 2024-11-22T15:22:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:53,078 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:53,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:53,078 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:53,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:53,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:53,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110975 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:53,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:53,081 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:53,081 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=108.4 K 2024-11-22T15:22:53,081 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:53,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19] 2024-11-22T15:22:53,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:53,082 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65bb600c18554f95a697b34f2179e93e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968262 2024-11-22T15:22:53,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:53,082 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:53,082 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/6308d16ce77b4244a728032a5a2cf352, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.0 K 2024-11-22T15:22:53,082 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfca9b4c51724b9593d798a270c89ca3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288969480 2024-11-22T15:22:53,083 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6308d16ce77b4244a728032a5a2cf352, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968262 2024-11-22T15:22:53,083 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c643a7fb711440ca19dd9d5fd93ad19, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971647 2024-11-22T15:22:53,083 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 473b6c69adf24b1993b3e44b1f6a040c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288969480 2024-11-22T15:22:53,084 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 49c606cf47844c98b58d3db2ee0969ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971648 2024-11-22T15:22:53,095 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:53,100 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,100 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:53,101 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/634f8093cbed40ecbe2c41ed0de15a24 is 50, key is test_row_0/B:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:53,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:53,102 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:53,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:53,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:53,106 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122223b7f28055d48a49426663dfb206060_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:53,108 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122223b7f28055d48a49426663dfb206060_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:53,108 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122223b7f28055d48a49426663dfb206060_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:53,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227ff879c85f644c9288d652f8ea38418d_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288972036/Put/seqid=0 2024-11-22T15:22:53,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742045_1221 (size=4469) 2024-11-22T15:22:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:53,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:53,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742046_1222 (size=12304) 2024-11-22T15:22:53,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:53,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742044_1220 (size=12629) 2024-11-22T15:22:53,178 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227ff879c85f644c9288d652f8ea38418d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227ff879c85f644c9288d652f8ea38418d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c3f0bfa72f64f9eac6e83ebd6273b4d, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:53,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c3f0bfa72f64f9eac6e83ebd6273b4d is 175, key is test_row_0/A:col10/1732288972036/Put/seqid=0 2024-11-22T15:22:53,186 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/634f8093cbed40ecbe2c41ed0de15a24 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/634f8093cbed40ecbe2c41ed0de15a24 2024-11-22T15:22:53,198 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into 634f8093cbed40ecbe2c41ed0de15a24(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:53,198 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:53,198 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=13, startTime=1732288973078; duration=0sec 2024-11-22T15:22:53,198 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:53,198 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:53,199 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:53,201 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:53,201 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:53,201 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:53,201 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/99405dcbd41a4d0da33d483fe8df1fe7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.0 K 2024-11-22T15:22:53,202 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 99405dcbd41a4d0da33d483fe8df1fe7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732288968262 2024-11-22T15:22:53,202 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a65ec6b917d44c5da5d3f58d72710b7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288969480 2024-11-22T15:22:53,203 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ce6abde3d0941f0ae2990d916e72f6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971648 2024-11-22T15:22:53,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742047_1223 (size=31105) 2024-11-22T15:22:53,222 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#191 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:53,225 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/12afbf0b2d284a54b924d50d50d9b828 is 50, key is test_row_0/C:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:53,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289033224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289033224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:53,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742048_1224 (size=12629) 2024-11-22T15:22:53,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289033330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289033330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289033534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289033534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,568 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#188 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:53,568 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a041bb2dfab44c809685ad3ea54bc96b is 175, key is test_row_0/A:col10/1732288971648/Put/seqid=0 2024-11-22T15:22:53,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742049_1225 (size=31583) 2024-11-22T15:22:53,592 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/a041bb2dfab44c809685ad3ea54bc96b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b 2024-11-22T15:22:53,610 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c3f0bfa72f64f9eac6e83ebd6273b4d 2024-11-22T15:22:53,635 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into a041bb2dfab44c809685ad3ea54bc96b(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:53,635 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:53,635 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=13, startTime=1732288973078; duration=0sec 2024-11-22T15:22:53,635 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:53,635 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:53,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6a1b74e079248238149a0ce291b5bdb is 50, key is test_row_0/B:col10/1732288972036/Put/seqid=0 2024-11-22T15:22:53,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289033677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,679 DEBUG [Thread-779 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:53,689 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/12afbf0b2d284a54b924d50d50d9b828 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/12afbf0b2d284a54b924d50d50d9b828 2024-11-22T15:22:53,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742050_1226 (size=12151) 2024-11-22T15:22:53,690 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6a1b74e079248238149a0ce291b5bdb 2024-11-22T15:22:53,700 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 12afbf0b2d284a54b924d50d50d9b828(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:53,700 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:53,700 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=13, startTime=1732288973079; duration=0sec 2024-11-22T15:22:53,700 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:53,701 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:53,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ced24df96f754b2cb9798cf1436d73ae is 50, key is test_row_0/C:col10/1732288972036/Put/seqid=0 2024-11-22T15:22:53,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742051_1227 (size=12151) 2024-11-22T15:22:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289033836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:53,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289033836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,134 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ced24df96f754b2cb9798cf1436d73ae 2024-11-22T15:22:54,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3c3f0bfa72f64f9eac6e83ebd6273b4d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d 2024-11-22T15:22:54,149 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d, entries=150, sequenceid=233, filesize=30.4 K 2024-11-22T15:22:54,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c6a1b74e079248238149a0ce291b5bdb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb 2024-11-22T15:22:54,161 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T15:22:54,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/ced24df96f754b2cb9798cf1436d73ae as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae 2024-11-22T15:22:54,169 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T15:22:54,170 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c30eef12e99d24ea8c4e5ace242daf20 in 1068ms, sequenceid=233, compaction requested=false 2024-11-22T15:22:54,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:54,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-22T15:22:54,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-22T15:22:54,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-22T15:22:54,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9980 sec 2024-11-22T15:22:54,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.0050 sec 2024-11-22T15:22:54,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-22T15:22:54,279 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-22T15:22:54,281 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-22T15:22:54,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:54,283 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:54,284 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:54,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:54,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:54,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:54,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bb8ec3c9752145c98fc173e5d4d52e7d_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742052_1228 (size=12304) 2024-11-22T15:22:54,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:54,385 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:54,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289034379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289034382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,391 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bb8ec3c9752145c98fc173e5d4d52e7d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bb8ec3c9752145c98fc173e5d4d52e7d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:54,392 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5abda39d82f040478ede1b63ded2c199, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:54,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5abda39d82f040478ede1b63ded2c199 is 175, key is test_row_0/A:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742053_1229 (size=31105) 2024-11-22T15:22:54,419 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5abda39d82f040478ede1b63ded2c199 2024-11-22T15:22:54,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/180094bfb8134298a428b94fd489adda is 50, key is test_row_0/B:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T15:22:54,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:54,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:54,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:54,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:54,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742054_1230 (size=12151) 2024-11-22T15:22:54,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/180094bfb8134298a428b94fd489adda 2024-11-22T15:22:54,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289034487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289034489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/d2beacd5edad40b3b634b6f3c45fc914 is 50, key is test_row_0/C:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742055_1231 (size=12151) 2024-11-22T15:22:54,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/d2beacd5edad40b3b634b6f3c45fc914 2024-11-22T15:22:54,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/5abda39d82f040478ede1b63ded2c199 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199 2024-11-22T15:22:54,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199, entries=150, sequenceid=252, filesize=30.4 K 2024-11-22T15:22:54,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/180094bfb8134298a428b94fd489adda as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda 2024-11-22T15:22:54,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda, entries=150, sequenceid=252, filesize=11.9 K 2024-11-22T15:22:54,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/d2beacd5edad40b3b634b6f3c45fc914 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914 2024-11-22T15:22:54,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914, entries=150, sequenceid=252, filesize=11.9 K 2024-11-22T15:22:54,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for c30eef12e99d24ea8c4e5ace242daf20 in 238ms, sequenceid=252, compaction requested=true 2024-11-22T15:22:54,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:54,579 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:54,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:54,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:54,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:54,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:54,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=91.6 K 2024-11-22T15:22:54,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199] 2024-11-22T15:22:54,581 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a041bb2dfab44c809685ad3ea54bc96b, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971648 2024-11-22T15:22:54,582 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c3f0bfa72f64f9eac6e83ebd6273b4d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732288972036 2024-11-22T15:22:54,582 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5abda39d82f040478ede1b63ded2c199, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:54,582 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:54,584 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:54,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:54,584 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:54,584 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,584 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/634f8093cbed40ecbe2c41ed0de15a24, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.1 K 2024-11-22T15:22:54,585 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 634f8093cbed40ecbe2c41ed0de15a24, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971648 2024-11-22T15:22:54,586 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a1b74e079248238149a0ce291b5bdb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732288972036 2024-11-22T15:22:54,587 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 180094bfb8134298a428b94fd489adda, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:54,589 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-22T15:22:54,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,590 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:54,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:54,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:54,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:54,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:54,610 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:54,625 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:54,625 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/baed282e94cb4642889737ebd0e23cb0 is 50, key is test_row_0/B:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,627 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411225190c404cb7448e8bc8eb1694f38417b_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:54,629 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411225190c404cb7448e8bc8eb1694f38417b_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:54,629 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225190c404cb7448e8bc8eb1694f38417b_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:54,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222ec9260a2c6c40928ef6df5cf389b9de_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288974381/Put/seqid=0 2024-11-22T15:22:54,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742058_1234 (size=4469) 2024-11-22T15:22:54,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742057_1233 (size=12454) 2024-11-22T15:22:54,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742056_1232 (size=12731) 2024-11-22T15:22:54,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:54,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:54,699 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/baed282e94cb4642889737ebd0e23cb0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/baed282e94cb4642889737ebd0e23cb0 2024-11-22T15:22:54,727 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into baed282e94cb4642889737ebd0e23cb0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:54,727 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:54,727 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=13, startTime=1732288974580; duration=0sec 2024-11-22T15:22:54,727 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:54,727 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:54,728 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:54,732 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:54,732 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:54,732 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:54,732 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/12afbf0b2d284a54b924d50d50d9b828, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.1 K 2024-11-22T15:22:54,732 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 12afbf0b2d284a54b924d50d50d9b828, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288971648 2024-11-22T15:22:54,733 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ced24df96f754b2cb9798cf1436d73ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732288972036 2024-11-22T15:22:54,733 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d2beacd5edad40b3b634b6f3c45fc914, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:54,745 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#200 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:54,745 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/d3daf610ec1c48c2b25b91a3c5b29955 is 50, key is test_row_0/C:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:54,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742059_1235 (size=12731) 2024-11-22T15:22:54,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289034764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,772 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/d3daf610ec1c48c2b25b91a3c5b29955 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d3daf610ec1c48c2b25b91a3c5b29955 2024-11-22T15:22:54,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289034771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,788 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into d3daf610ec1c48c2b25b91a3c5b29955(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:54,788 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:54,788 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=13, startTime=1732288974598; duration=0sec 2024-11-22T15:22:54,788 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:54,788 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:54,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289034873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289034876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:54,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:55,064 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#197 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:55,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7e3c2f7c690a4566aec9a62c36f3163f is 175, key is test_row_0/A:col10/1732288974339/Put/seqid=0 2024-11-22T15:22:55,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:55,074 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222ec9260a2c6c40928ef6df5cf389b9de_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ec9260a2c6c40928ef6df5cf389b9de_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:55,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/0f0a46aa6a5e49ada6540515ec20511e, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:55,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/0f0a46aa6a5e49ada6540515ec20511e is 175, key is test_row_0/A:col10/1732288974381/Put/seqid=0 2024-11-22T15:22:55,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289035077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289035079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742060_1236 (size=31685) 2024-11-22T15:22:55,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742061_1237 (size=31255) 2024-11-22T15:22:55,099 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/0f0a46aa6a5e49ada6540515ec20511e 2024-11-22T15:22:55,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/2504e5828f1240079dce8f413878618b is 50, key is test_row_0/B:col10/1732288974381/Put/seqid=0 2024-11-22T15:22:55,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742062_1238 (size=12301) 2024-11-22T15:22:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:55,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289035385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289035384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,500 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/7e3c2f7c690a4566aec9a62c36f3163f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f 2024-11-22T15:22:55,510 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into 7e3c2f7c690a4566aec9a62c36f3163f(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:55,510 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:55,510 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=13, startTime=1732288974579; duration=0sec 2024-11-22T15:22:55,510 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:55,510 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:55,566 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/2504e5828f1240079dce8f413878618b 2024-11-22T15:22:55,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c128b190f15444a2beae969b73e10a23 is 50, key is test_row_0/C:col10/1732288974381/Put/seqid=0 2024-11-22T15:22:55,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742063_1239 (size=12301) 2024-11-22T15:22:55,614 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c128b190f15444a2beae969b73e10a23 2024-11-22T15:22:55,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/0f0a46aa6a5e49ada6540515ec20511e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e 2024-11-22T15:22:55,633 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e, entries=150, sequenceid=272, filesize=30.5 K 2024-11-22T15:22:55,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/2504e5828f1240079dce8f413878618b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b 2024-11-22T15:22:55,641 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T15:22:55,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c128b190f15444a2beae969b73e10a23 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23 2024-11-22T15:22:55,648 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T15:22:55,649 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for c30eef12e99d24ea8c4e5ace242daf20 in 1059ms, sequenceid=272, compaction requested=false 2024-11-22T15:22:55,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:55,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:55,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-22T15:22:55,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-22T15:22:55,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-22T15:22:55,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3670 sec 2024-11-22T15:22:55,656 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.3730 sec 2024-11-22T15:22:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:55,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:55,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:55,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bad5a3ad7ef743d39509f5363eaee9f2_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:55,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289035976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:55,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289035982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742064_1240 (size=12454) 2024-11-22T15:22:55,992 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:55,997 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bad5a3ad7ef743d39509f5363eaee9f2_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bad5a3ad7ef743d39509f5363eaee9f2_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:55,998 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ab6520bc9706402ab26787e28015f652, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:55,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ab6520bc9706402ab26787e28015f652 is 175, key is test_row_0/A:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:56,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742065_1241 (size=31255) 2024-11-22T15:22:56,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289036085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289036088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289036290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289036291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-22T15:22:56,387 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-22T15:22:56,388 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-22T15:22:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:56,390 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:56,391 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:56,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:56,437 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ab6520bc9706402ab26787e28015f652 2024-11-22T15:22:56,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/46f2e6ceabcb48759432fa4009ba3e8e is 50, key is test_row_0/B:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:56,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742066_1242 (size=12301) 2024-11-22T15:22:56,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/46f2e6ceabcb48759432fa4009ba3e8e 2024-11-22T15:22:56,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:56,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/449425cd64894c79a932a8acb57ea797 is 50, key is test_row_0/C:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:56,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T15:22:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:56,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742067_1243 (size=12301) 2024-11-22T15:22:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289036593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289036594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:56,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T15:22:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:56,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T15:22:56,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:22:56,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/449425cd64894c79a932a8acb57ea797 2024-11-22T15:22:56,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/ab6520bc9706402ab26787e28015f652 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652 2024-11-22T15:22:56,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652, entries=150, sequenceid=293, filesize=30.5 K 2024-11-22T15:22:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/46f2e6ceabcb48759432fa4009ba3e8e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e 2024-11-22T15:22:56,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e, entries=150, sequenceid=293, filesize=12.0 K 2024-11-22T15:22:56,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/449425cd64894c79a932a8acb57ea797 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797 2024-11-22T15:22:56,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797, entries=150, sequenceid=293, filesize=12.0 K 2024-11-22T15:22:56,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for c30eef12e99d24ea8c4e5ace242daf20 in 1074ms, sequenceid=293, compaction requested=true 2024-11-22T15:22:56,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:56,970 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:56,970 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:56,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:56,971 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:56,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:56,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:56,971 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:56,971 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,971 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,971 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/baed282e94cb4642889737ebd0e23cb0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.5 K 2024-11-22T15:22:56,971 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=92.0 K 2024-11-22T15:22:56,971 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:56,971 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652] 2024-11-22T15:22:56,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e3c2f7c690a4566aec9a62c36f3163f, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:56,972 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting baed282e94cb4642889737ebd0e23cb0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:56,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f0a46aa6a5e49ada6540515ec20511e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732288974377 2024-11-22T15:22:56,973 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2504e5828f1240079dce8f413878618b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732288974377 2024-11-22T15:22:56,973 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab6520bc9706402ab26787e28015f652, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:56,973 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 46f2e6ceabcb48759432fa4009ba3e8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:56,991 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:56,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:56,992 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/ac62b0a88c57420e94b79edd8b991463 is 50, key is test_row_0/B:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:57,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-22T15:22:57,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:57,002 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T15:22:57,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,007 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:57,036 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411229caac316e3f1421299985be804ff0a37_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:57,038 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411229caac316e3f1421299985be804ff0a37_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:57,038 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229caac316e3f1421299985be804ff0a37_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742068_1244 (size=12983) 2024-11-22T15:22:57,069 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/ac62b0a88c57420e94b79edd8b991463 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/ac62b0a88c57420e94b79edd8b991463 2024-11-22T15:22:57,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122859c866db31d45ec97ad3297bf947771_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288975971/Put/seqid=0 2024-11-22T15:22:57,080 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into ac62b0a88c57420e94b79edd8b991463(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:57,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:57,080 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=13, startTime=1732288976970; duration=0sec 2024-11-22T15:22:57,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:57,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:57,080 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:57,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:57,083 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:57,083 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:57,083 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d3daf610ec1c48c2b25b91a3c5b29955, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.5 K 2024-11-22T15:22:57,084 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d3daf610ec1c48c2b25b91a3c5b29955, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732288973196 2024-11-22T15:22:57,084 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c128b190f15444a2beae969b73e10a23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732288974377 2024-11-22T15:22:57,085 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 449425cd64894c79a932a8acb57ea797, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:57,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742070_1246 (size=12454) 2024-11-22T15:22:57,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:57,105 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122859c866db31d45ec97ad3297bf947771_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122859c866db31d45ec97ad3297bf947771_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:57,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:57,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:57,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/254c3baf69624b08b539a2f3d23d09c1, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:57,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/254c3baf69624b08b539a2f3d23d09c1 is 175, key is test_row_0/A:col10/1732288975971/Put/seqid=0 2024-11-22T15:22:57,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742069_1245 (size=4469) 2024-11-22T15:22:57,113 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#207 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:57,113 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9c7c39bd73734ade9def32ff8bf2066e is 175, key is test_row_0/A:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:57,122 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#209 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:57,122 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7ffe380b84214e548e430144a1ed445b is 50, key is test_row_0/C:col10/1732288974758/Put/seqid=0 2024-11-22T15:22:57,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289037139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289037144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742072_1248 (size=31937) 2024-11-22T15:22:57,181 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/9c7c39bd73734ade9def32ff8bf2066e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e 2024-11-22T15:22:57,190 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into 9c7c39bd73734ade9def32ff8bf2066e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:57,190 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:57,190 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=13, startTime=1732288976970; duration=0sec 2024-11-22T15:22:57,190 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:57,190 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:57,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742071_1247 (size=31255) 2024-11-22T15:22:57,197 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/254c3baf69624b08b539a2f3d23d09c1 2024-11-22T15:22:57,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742073_1249 (size=12983) 2024-11-22T15:22:57,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/481c7d0571534f19942fac17a9d0d3c3 is 50, key is test_row_0/B:col10/1732288975971/Put/seqid=0 2024-11-22T15:22:57,211 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/7ffe380b84214e548e430144a1ed445b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ffe380b84214e548e430144a1ed445b 2024-11-22T15:22:57,216 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 7ffe380b84214e548e430144a1ed445b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:57,217 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:57,217 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=13, startTime=1732288976970; duration=0sec 2024-11-22T15:22:57,217 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:57,217 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:57,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742074_1250 (size=12301) 2024-11-22T15:22:57,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289037245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289037250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289037448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289037453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:57,630 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/481c7d0571534f19942fac17a9d0d3c3 2024-11-22T15:22:57,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6564a30fac184bcf8b0ff0b20e8e1829 is 50, key is test_row_0/C:col10/1732288975971/Put/seqid=0 2024-11-22T15:22:57,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742075_1251 (size=12301) 2024-11-22T15:22:57,697 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6564a30fac184bcf8b0ff0b20e8e1829 2024-11-22T15:22:57,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/254c3baf69624b08b539a2f3d23d09c1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1 2024-11-22T15:22:57,716 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1, entries=150, sequenceid=311, filesize=30.5 K 2024-11-22T15:22:57,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58702 deadline: 1732289037714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,718 DEBUG [Thread-779 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8221 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:22:57,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/481c7d0571534f19942fac17a9d0d3c3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3 2024-11-22T15:22:57,729 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3, entries=150, sequenceid=311, filesize=12.0 K 2024-11-22T15:22:57,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/6564a30fac184bcf8b0ff0b20e8e1829 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829 2024-11-22T15:22:57,739 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829, entries=150, sequenceid=311, filesize=12.0 K 2024-11-22T15:22:57,740 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for c30eef12e99d24ea8c4e5ace242daf20 in 738ms, sequenceid=311, compaction requested=false 2024-11-22T15:22:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-22T15:22:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-22T15:22:57,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-22T15:22:57,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3500 sec 2024-11-22T15:22:57,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.3550 sec 2024-11-22T15:22:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:57,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:57,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:57,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f319f9a9a3bf4083aaa1f16475e0119d_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:57,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289037789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289037791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742076_1252 (size=14994) 2024-11-22T15:22:57,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289037893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:57,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289037897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:58,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289038094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289038103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,213 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:58,218 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f319f9a9a3bf4083aaa1f16475e0119d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f319f9a9a3bf4083aaa1f16475e0119d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:58,220 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/cd0c2fa4f1b0499ba0651a86b4029af7, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/cd0c2fa4f1b0499ba0651a86b4029af7 is 175, key is test_row_0/A:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742077_1253 (size=39949) 2024-11-22T15:22:58,254 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/cd0c2fa4f1b0499ba0651a86b4029af7 2024-11-22T15:22:58,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e9443652b6b9499cbef183232357fadc is 50, key is test_row_0/B:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742078_1254 (size=12301) 2024-11-22T15:22:58,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e9443652b6b9499cbef183232357fadc 2024-11-22T15:22:58,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/29762fee1b3247e695cf9e857bd6b368 is 50, key is test_row_0/C:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58734 deadline: 1732289038401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742079_1255 (size=12301) 2024-11-22T15:22:58,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/29762fee1b3247e695cf9e857bd6b368 2024-11-22T15:22:58,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:22:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58668 deadline: 1732289038409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/cd0c2fa4f1b0499ba0651a86b4029af7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7 2024-11-22T15:22:58,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7, entries=200, sequenceid=333, filesize=39.0 K 2024-11-22T15:22:58,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/e9443652b6b9499cbef183232357fadc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc 2024-11-22T15:22:58,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc, entries=150, sequenceid=333, filesize=12.0 K 2024-11-22T15:22:58,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/29762fee1b3247e695cf9e857bd6b368 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368 2024-11-22T15:22:58,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368, entries=150, sequenceid=333, filesize=12.0 K 2024-11-22T15:22:58,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for c30eef12e99d24ea8c4e5ace242daf20 in 692ms, sequenceid=333, compaction requested=true 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:58,444 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:58,444 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c30eef12e99d24ea8c4e5ace242daf20:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:22:58,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/A is initiating minor compaction (all files) 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:58,446 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/A in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/B is initiating minor compaction (all files) 2024-11-22T15:22:58,446 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/B in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:58,446 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=100.7 K 2024-11-22T15:22:58,446 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/ac62b0a88c57420e94b79edd8b991463, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.7 K 2024-11-22T15:22:58,446 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7] 2024-11-22T15:22:58,446 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ac62b0a88c57420e94b79edd8b991463, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:58,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 481c7d0571534f19942fac17a9d0d3c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732288975955 2024-11-22T15:22:58,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c7c39bd73734ade9def32ff8bf2066e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:58,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e9443652b6b9499cbef183232357fadc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732288977137 2024-11-22T15:22:58,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 254c3baf69624b08b539a2f3d23d09c1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732288975955 2024-11-22T15:22:58,448 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd0c2fa4f1b0499ba0651a86b4029af7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732288977136 2024-11-22T15:22:58,465 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#B#compaction#215 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:58,466 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c8d5ef5a4b4145acb5cf52479dac29a6 is 50, key is test_row_0/B:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,473 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,482 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411225f7a9beb770b432b9e0521f0cf26da8c_c30eef12e99d24ea8c4e5ace242daf20 store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,484 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411225f7a9beb770b432b9e0521f0cf26da8c_c30eef12e99d24ea8c4e5ace242daf20, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,484 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225f7a9beb770b432b9e0521f0cf26da8c_c30eef12e99d24ea8c4e5ace242daf20 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-22T15:22:58,493 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-22T15:22:58,495 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:22:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-22T15:22:58,497 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:22:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:22:58,498 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:22:58,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:22:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742080_1256 (size=13085) 2024-11-22T15:22:58,516 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/c8d5ef5a4b4145acb5cf52479dac29a6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c8d5ef5a4b4145acb5cf52479dac29a6 2024-11-22T15:22:58,521 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/B of c30eef12e99d24ea8c4e5ace242daf20 into c8d5ef5a4b4145acb5cf52479dac29a6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:58,521 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:58,521 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/B, priority=13, startTime=1732288978444; duration=0sec 2024-11-22T15:22:58,521 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:22:58,522 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:B 2024-11-22T15:22:58,522 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:22:58,524 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:22:58,524 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): c30eef12e99d24ea8c4e5ace242daf20/C is initiating minor compaction (all files) 2024-11-22T15:22:58,525 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c30eef12e99d24ea8c4e5ace242daf20/C in TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:58,525 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ffe380b84214e548e430144a1ed445b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp, totalSize=36.7 K 2024-11-22T15:22:58,525 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ffe380b84214e548e430144a1ed445b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732288974734 2024-11-22T15:22:58,525 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6564a30fac184bcf8b0ff0b20e8e1829, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732288975955 2024-11-22T15:22:58,527 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 29762fee1b3247e695cf9e857bd6b368, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732288977137 2024-11-22T15:22:58,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742081_1257 (size=4469) 2024-11-22T15:22:58,530 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#A#compaction#216 average throughput is 0.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:58,531 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/4194d5db2e004f31a9bc93bc8bffea97 is 175, key is test_row_0/A:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742082_1258 (size=32039) 2024-11-22T15:22:58,560 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/4194d5db2e004f31a9bc93bc8bffea97 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/4194d5db2e004f31a9bc93bc8bffea97 2024-11-22T15:22:58,572 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c30eef12e99d24ea8c4e5ace242daf20#C#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:22:58,572 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/755c3a8bb1d44bf3a5d58ee000cf8b52 is 50, key is test_row_0/C:col10/1732288977751/Put/seqid=0 2024-11-22T15:22:58,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/A of c30eef12e99d24ea8c4e5ace242daf20 into 4194d5db2e004f31a9bc93bc8bffea97(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:58,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:58,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/A, priority=13, startTime=1732288978444; duration=0sec 2024-11-22T15:22:58,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:58,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:A 2024-11-22T15:22:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:22:58,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742083_1259 (size=13085) 2024-11-22T15:22:58,650 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:22:58,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-22T15:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:58,651 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:22:58,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:58,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:22:58,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:22:58,656 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/755c3a8bb1d44bf3a5d58ee000cf8b52 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/755c3a8bb1d44bf3a5d58ee000cf8b52 2024-11-22T15:22:58,661 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c30eef12e99d24ea8c4e5ace242daf20/C of c30eef12e99d24ea8c4e5ace242daf20 into 755c3a8bb1d44bf3a5d58ee000cf8b52(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:22:58,662 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:58,662 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20., storeName=c30eef12e99d24ea8c4e5ace242daf20/C, priority=13, startTime=1732288978444; duration=0sec 2024-11-22T15:22:58,662 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:22:58,662 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c30eef12e99d24ea8c4e5ace242daf20:C 2024-11-22T15:22:58,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ccb8186f2d5b45209d97e01152c41a6d_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_0/A:col10/1732288977779/Put/seqid=0 2024-11-22T15:22:58,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742084_1260 (size=12454) 2024-11-22T15:22:58,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:22:58,720 DEBUG [Thread-794 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b727d6e to 127.0.0.1:52970 2024-11-22T15:22:58,720 DEBUG [Thread-794 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:58,721 DEBUG [Thread-796 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c7940d9 to 127.0.0.1:52970 2024-11-22T15:22:58,721 DEBUG [Thread-796 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:58,723 DEBUG [Thread-790 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:52970 2024-11-22T15:22:58,723 DEBUG [Thread-790 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:58,724 DEBUG [Thread-792 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:52970 2024-11-22T15:22:58,724 DEBUG [Thread-792 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:58,726 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ccb8186f2d5b45209d97e01152c41a6d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ccb8186f2d5b45209d97e01152c41a6d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:58,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/d496f6f0990f455d979afae980d6aacd, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:22:58,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/d496f6f0990f455d979afae980d6aacd is 175, key is test_row_0/A:col10/1732288977779/Put/seqid=0 2024-11-22T15:22:58,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742085_1261 (size=31255) 2024-11-22T15:22:58,745 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=352, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/d496f6f0990f455d979afae980d6aacd 2024-11-22T15:22:58,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9e4dac7b82874cd089286135f5531e36 is 50, key is test_row_0/B:col10/1732288977779/Put/seqid=0 2024-11-22T15:22:58,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742086_1262 (size=12301) 2024-11-22T15:22:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:22:58,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:22:58,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. as already flushing 2024-11-22T15:22:58,909 DEBUG [Thread-785 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a9306be to 127.0.0.1:52970 2024-11-22T15:22:58,909 DEBUG [Thread-785 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:58,921 DEBUG [Thread-781 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c7d6279 to 127.0.0.1:52970 2024-11-22T15:22:58,921 DEBUG [Thread-781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:22:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:22:59,173 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9e4dac7b82874cd089286135f5531e36 2024-11-22T15:22:59,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/40c9ead1e4e8472ab0f6cbb5322d3141 is 50, key is test_row_0/C:col10/1732288977779/Put/seqid=0 2024-11-22T15:22:59,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742087_1263 (size=12301) 2024-11-22T15:22:59,583 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/40c9ead1e4e8472ab0f6cbb5322d3141 2024-11-22T15:22:59,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/d496f6f0990f455d979afae980d6aacd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/d496f6f0990f455d979afae980d6aacd 2024-11-22T15:22:59,593 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/d496f6f0990f455d979afae980d6aacd, entries=150, sequenceid=352, filesize=30.5 K 2024-11-22T15:22:59,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/9e4dac7b82874cd089286135f5531e36 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9e4dac7b82874cd089286135f5531e36 2024-11-22T15:22:59,598 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9e4dac7b82874cd089286135f5531e36, entries=150, sequenceid=352, filesize=12.0 K 2024-11-22T15:22:59,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/40c9ead1e4e8472ab0f6cbb5322d3141 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/40c9ead1e4e8472ab0f6cbb5322d3141 2024-11-22T15:22:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:22:59,603 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/40c9ead1e4e8472ab0f6cbb5322d3141, entries=150, sequenceid=352, filesize=12.0 K 2024-11-22T15:22:59,604 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=13.42 KB/13740 for c30eef12e99d24ea8c4e5ace242daf20 in 953ms, sequenceid=352, compaction requested=false 2024-11-22T15:22:59,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:22:59,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:22:59,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-22T15:22:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-22T15:22:59,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-22T15:22:59,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1080 sec 2024-11-22T15:22:59,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.1120 sec 2024-11-22T15:22:59,869 DEBUG [master/77927f992d0b:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e7d35e0ae1b576a64b6f8105b0d3681e changed from -1.0 to 0.0, refreshing cache 2024-11-22T15:23:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-22T15:23:00,603 INFO [Thread-789 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-22T15:23:01,823 DEBUG [Thread-783 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328f994d to 127.0.0.1:52970 2024-11-22T15:23:01,823 DEBUG [Thread-783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:01,866 DEBUG [Thread-787 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x769942d9 to 127.0.0.1:52970 2024-11-22T15:23:01,866 DEBUG [Thread-787 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:07,739 DEBUG [Thread-779 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29458edd to 127.0.0.1:52970 2024-11-22T15:23:07,739 DEBUG [Thread-779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3509 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3574 2024-11-22T15:23:07,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:23:07,740 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1570 2024-11-22T15:23:07,740 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4710 rows 2024-11-22T15:23:07,740 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1563 2024-11-22T15:23:07,740 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4689 rows 2024-11-22T15:23:07,740 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:23:07,740 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04ddf4c3 to 127.0.0.1:52970 2024-11-22T15:23:07,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:07,742 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:23:07,742 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:23:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:07,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:07,747 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288987746"}]},"ts":"1732288987746"} 2024-11-22T15:23:07,748 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:23:07,765 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:23:07,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:23:07,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, UNASSIGN}] 2024-11-22T15:23:07,769 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, UNASSIGN 2024-11-22T15:23:07,770 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:07,771 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:23:07,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:07,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:07,923 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:07,924 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:07,924 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:23:07,924 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing c30eef12e99d24ea8c4e5ace242daf20, disabling compactions & flushes 2024-11-22T15:23:07,924 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:23:07,924 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:23:07,924 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. after waiting 0 ms 2024-11-22T15:23:07,924 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:23:07,925 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing c30eef12e99d24ea8c4e5ace242daf20 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T15:23:07,925 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=A 2024-11-22T15:23:07,925 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:07,925 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=B 2024-11-22T15:23:07,926 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:07,926 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c30eef12e99d24ea8c4e5ace242daf20, store=C 2024-11-22T15:23:07,926 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:07,937 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f939e7d463314423affca1c18497e606_c30eef12e99d24ea8c4e5ace242daf20 is 50, key is test_row_1/A:col10/1732288987738/Put/seqid=0 2024-11-22T15:23:07,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742088_1264 (size=9914) 2024-11-22T15:23:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:08,342 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:08,352 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f939e7d463314423affca1c18497e606_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f939e7d463314423affca1c18497e606_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:08,353 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3fe7fa48df134d06a30db6106cca7987, store: [table=TestAcidGuarantees family=A region=c30eef12e99d24ea8c4e5ace242daf20] 2024-11-22T15:23:08,354 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3fe7fa48df134d06a30db6106cca7987 is 175, key is test_row_1/A:col10/1732288987738/Put/seqid=0 2024-11-22T15:23:08,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742089_1265 (size=22561) 2024-11-22T15:23:08,761 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=361, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3fe7fa48df134d06a30db6106cca7987 2024-11-22T15:23:08,773 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49ded4a16f734362b58030c8f5f1f90f is 50, key is test_row_1/B:col10/1732288987738/Put/seqid=0 2024-11-22T15:23:08,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742090_1266 (size=9857) 2024-11-22T15:23:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:09,179 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49ded4a16f734362b58030c8f5f1f90f 2024-11-22T15:23:09,193 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c346a2c48e3f4d8e9337734b3e1e5d88 is 50, key is test_row_1/C:col10/1732288987738/Put/seqid=0 2024-11-22T15:23:09,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742091_1267 (size=9857) 2024-11-22T15:23:09,600 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c346a2c48e3f4d8e9337734b3e1e5d88 2024-11-22T15:23:09,612 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/A/3fe7fa48df134d06a30db6106cca7987 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3fe7fa48df134d06a30db6106cca7987 2024-11-22T15:23:09,618 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3fe7fa48df134d06a30db6106cca7987, entries=100, sequenceid=361, filesize=22.0 K 2024-11-22T15:23:09,619 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/B/49ded4a16f734362b58030c8f5f1f90f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49ded4a16f734362b58030c8f5f1f90f 2024-11-22T15:23:09,624 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49ded4a16f734362b58030c8f5f1f90f, entries=100, sequenceid=361, filesize=9.6 K 2024-11-22T15:23:09,625 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/.tmp/C/c346a2c48e3f4d8e9337734b3e1e5d88 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c346a2c48e3f4d8e9337734b3e1e5d88 2024-11-22T15:23:09,629 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c346a2c48e3f4d8e9337734b3e1e5d88, entries=100, sequenceid=361, filesize=9.6 K 2024-11-22T15:23:09,629 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for c30eef12e99d24ea8c4e5ace242daf20 in 1705ms, sequenceid=361, compaction requested=true 2024-11-22T15:23:09,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7] to archive 2024-11-22T15:23:09,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:09,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/bbe688e57dfb4609a0a84b4a198dca57 2024-11-22T15:23:09,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5522076e51604a3e8659c5868d24d567 2024-11-22T15:23:09,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7eef65639afb4899a3b69efc41b1291e 2024-11-22T15:23:09,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/104bd25252c745ef9dfe0f7add7a12d3 2024-11-22T15:23:09,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/2b970bb950294ddba529e21c160e436a 2024-11-22T15:23:09,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/477acd5f1fd3424da03a1da7d9727f79 2024-11-22T15:23:09,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ba095335efa942b9b5d38d0ff5d20601 2024-11-22T15:23:09,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/f6501987ec2d46a798d0481f92454973 2024-11-22T15:23:09,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/730423c297854debaed2d150e43ba385 2024-11-22T15:23:09,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a805230534c34ba7bf1b71d5e5feb53e 2024-11-22T15:23:09,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9db998a0c8af45fd9bbb44ff5296d0e5 2024-11-22T15:23:09,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/65bb600c18554f95a697b34f2179e93e 2024-11-22T15:23:09,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/dfca9b4c51724b9593d798a270c89ca3 2024-11-22T15:23:09,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c643a7fb711440ca19dd9d5fd93ad19 2024-11-22T15:23:09,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/a041bb2dfab44c809685ad3ea54bc96b 2024-11-22T15:23:09,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3c3f0bfa72f64f9eac6e83ebd6273b4d 2024-11-22T15:23:09,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/7e3c2f7c690a4566aec9a62c36f3163f 2024-11-22T15:23:09,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/5abda39d82f040478ede1b63ded2c199 2024-11-22T15:23:09,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/0f0a46aa6a5e49ada6540515ec20511e 2024-11-22T15:23:09,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/9c7c39bd73734ade9def32ff8bf2066e 2024-11-22T15:23:09,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/ab6520bc9706402ab26787e28015f652 2024-11-22T15:23:09,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/254c3baf69624b08b539a2f3d23d09c1 2024-11-22T15:23:09,672 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/cd0c2fa4f1b0499ba0651a86b4029af7 2024-11-22T15:23:09,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bdffba8f9c4640b7add7dc79a55cbfed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/88068f038c5a4b8b91a245f3b12a8434, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/6308d16ce77b4244a728032a5a2cf352, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/634f8093cbed40ecbe2c41ed0de15a24, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/baed282e94cb4642889737ebd0e23cb0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/ac62b0a88c57420e94b79edd8b991463, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc] to archive 2024-11-22T15:23:09,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:09,676 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e16db852644540149b4cc2120ca0d78f 2024-11-22T15:23:09,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/fdf803244299492db4cc0fed08af070e 2024-11-22T15:23:09,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6e06b0d962049ca9a127b1b6e2b6925 2024-11-22T15:23:09,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bdffba8f9c4640b7add7dc79a55cbfed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bdffba8f9c4640b7add7dc79a55cbfed 2024-11-22T15:23:09,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/4be0a8483c5941b18c60b8dfbe6c8ef8 2024-11-22T15:23:09,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/44683ba1603a4af78a8114fffb424e07 2024-11-22T15:23:09,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/88068f038c5a4b8b91a245f3b12a8434 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/88068f038c5a4b8b91a245f3b12a8434 2024-11-22T15:23:09,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/d8adb163832a4619b99d39cd7335cd82 2024-11-22T15:23:09,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9234b8852b4946edab595e3e4cbd54e4 2024-11-22T15:23:09,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/66c3ac0aeecb4292b3a6344c7fb7fbb6 2024-11-22T15:23:09,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/6308d16ce77b4244a728032a5a2cf352 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/6308d16ce77b4244a728032a5a2cf352 2024-11-22T15:23:09,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/bd4d6d77076a447e8848a881a8195eae 2024-11-22T15:23:09,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/473b6c69adf24b1993b3e44b1f6a040c 2024-11-22T15:23:09,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/634f8093cbed40ecbe2c41ed0de15a24 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/634f8093cbed40ecbe2c41ed0de15a24 2024-11-22T15:23:09,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49c606cf47844c98b58d3db2ee0969ae 2024-11-22T15:23:09,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c6a1b74e079248238149a0ce291b5bdb 2024-11-22T15:23:09,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/baed282e94cb4642889737ebd0e23cb0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/baed282e94cb4642889737ebd0e23cb0 2024-11-22T15:23:09,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/180094bfb8134298a428b94fd489adda 2024-11-22T15:23:09,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/2504e5828f1240079dce8f413878618b 2024-11-22T15:23:09,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/ac62b0a88c57420e94b79edd8b991463 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/ac62b0a88c57420e94b79edd8b991463 2024-11-22T15:23:09,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/46f2e6ceabcb48759432fa4009ba3e8e 2024-11-22T15:23:09,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/481c7d0571534f19942fac17a9d0d3c3 2024-11-22T15:23:09,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/e9443652b6b9499cbef183232357fadc 2024-11-22T15:23:09,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7b2226af89f14170a912d72111904c51, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8814a9375fee47e0a6c8d9abb0972bf5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/99405dcbd41a4d0da33d483fe8df1fe7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/12afbf0b2d284a54b924d50d50d9b828, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d3daf610ec1c48c2b25b91a3c5b29955, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ffe380b84214e548e430144a1ed445b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368] to archive 2024-11-22T15:23:09,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:09,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e5144371e8ef45b88889289ed515fdb7 2024-11-22T15:23:09,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/66cbc3c1e2eb433991bacc269d007ce5 2024-11-22T15:23:09,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/1c996d0ab45a4858a8972c5fdf44165d 2024-11-22T15:23:09,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7b2226af89f14170a912d72111904c51 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7b2226af89f14170a912d72111904c51 2024-11-22T15:23:09,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ac28d40a110d454899960bb778952e7a 2024-11-22T15:23:09,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6cc2ebca5242437cbb848b89ff2cad78 2024-11-22T15:23:09,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8814a9375fee47e0a6c8d9abb0972bf5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8814a9375fee47e0a6c8d9abb0972bf5 2024-11-22T15:23:09,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/991173ab1c9b4afbb5b6654a0a7fb270 2024-11-22T15:23:09,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/2428507d4d3b44239c68b56f5a34593a 2024-11-22T15:23:09,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/e1f7fd9908f74e8a85c9ce23f5cfe275 2024-11-22T15:23:09,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/99405dcbd41a4d0da33d483fe8df1fe7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/99405dcbd41a4d0da33d483fe8df1fe7 2024-11-22T15:23:09,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/8baa17766bb447fbb3f0b71442724ce1 2024-11-22T15:23:09,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/a65ec6b917d44c5da5d3f58d72710b7b 2024-11-22T15:23:09,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/12afbf0b2d284a54b924d50d50d9b828 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/12afbf0b2d284a54b924d50d50d9b828 2024-11-22T15:23:09,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ce6abde3d0941f0ae2990d916e72f6f 2024-11-22T15:23:09,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/ced24df96f754b2cb9798cf1436d73ae 2024-11-22T15:23:09,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d3daf610ec1c48c2b25b91a3c5b29955 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d3daf610ec1c48c2b25b91a3c5b29955 2024-11-22T15:23:09,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/d2beacd5edad40b3b634b6f3c45fc914 2024-11-22T15:23:09,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c128b190f15444a2beae969b73e10a23 2024-11-22T15:23:09,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ffe380b84214e548e430144a1ed445b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/7ffe380b84214e548e430144a1ed445b 2024-11-22T15:23:09,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/449425cd64894c79a932a8acb57ea797 2024-11-22T15:23:09,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/6564a30fac184bcf8b0ff0b20e8e1829 2024-11-22T15:23:09,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/29762fee1b3247e695cf9e857bd6b368 2024-11-22T15:23:09,717 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits/364.seqid, newMaxSeqId=364, maxSeqId=4 2024-11-22T15:23:09,718 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20. 2024-11-22T15:23:09,718 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for c30eef12e99d24ea8c4e5ace242daf20: 2024-11-22T15:23:09,719 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,720 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c30eef12e99d24ea8c4e5ace242daf20, regionState=CLOSED 2024-11-22T15:23:09,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-22T15:23:09,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure c30eef12e99d24ea8c4e5ace242daf20, server=77927f992d0b,36033,1732288915809 in 1.9490 sec 2024-11-22T15:23:09,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-22T15:23:09,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c30eef12e99d24ea8c4e5ace242daf20, UNASSIGN in 1.9530 sec 2024-11-22T15:23:09,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-22T15:23:09,723 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9560 sec 2024-11-22T15:23:09,724 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288989724"}]},"ts":"1732288989724"} 2024-11-22T15:23:09,724 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:23:09,769 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:23:09,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0280 sec 2024-11-22T15:23:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-22T15:23:09,854 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-22T15:23:09,855 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:23:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,858 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-22T15:23:09,860 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,862 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,864 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits] 2024-11-22T15:23:09,867 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3fe7fa48df134d06a30db6106cca7987 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/3fe7fa48df134d06a30db6106cca7987 2024-11-22T15:23:09,868 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/4194d5db2e004f31a9bc93bc8bffea97 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/4194d5db2e004f31a9bc93bc8bffea97 2024-11-22T15:23:09,869 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/d496f6f0990f455d979afae980d6aacd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/A/d496f6f0990f455d979afae980d6aacd 2024-11-22T15:23:09,872 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49ded4a16f734362b58030c8f5f1f90f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/49ded4a16f734362b58030c8f5f1f90f 2024-11-22T15:23:09,873 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9e4dac7b82874cd089286135f5531e36 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/9e4dac7b82874cd089286135f5531e36 2024-11-22T15:23:09,874 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c8d5ef5a4b4145acb5cf52479dac29a6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/B/c8d5ef5a4b4145acb5cf52479dac29a6 2024-11-22T15:23:09,877 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/40c9ead1e4e8472ab0f6cbb5322d3141 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/40c9ead1e4e8472ab0f6cbb5322d3141 2024-11-22T15:23:09,877 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/755c3a8bb1d44bf3a5d58ee000cf8b52 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/755c3a8bb1d44bf3a5d58ee000cf8b52 2024-11-22T15:23:09,878 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c346a2c48e3f4d8e9337734b3e1e5d88 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/C/c346a2c48e3f4d8e9337734b3e1e5d88 2024-11-22T15:23:09,880 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits/364.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20/recovered.edits/364.seqid 2024-11-22T15:23:09,881 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,881 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:23:09,881 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:23:09,882 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T15:23:09,884 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ec9260a2c6c40928ef6df5cf389b9de_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222ec9260a2c6c40928ef6df5cf389b9de_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,885 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122436118fb2ebd45749bb20d52a2bd3cbd_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122436118fb2ebd45749bb20d52a2bd3cbd_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,886 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248958d2a72ee48c5b3d6e83c8a955249_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248958d2a72ee48c5b3d6e83c8a955249_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,887 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227ff879c85f644c9288d652f8ea38418d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227ff879c85f644c9288d652f8ea38418d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,888 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122859c866db31d45ec97ad3297bf947771_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122859c866db31d45ec97ad3297bf947771_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,890 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112291398ce2d3bf457f8eb4bbd6680e1029_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112291398ce2d3bf457f8eb4bbd6680e1029_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,891 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8397ecbfcc14154aa911a34fdddf7ca_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8397ecbfcc14154aa911a34fdddf7ca_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,892 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122abb3099f6d45438a8806a7304892f914_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122abb3099f6d45438a8806a7304892f914_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,893 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad689db6d84140cca091093694d58be5_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ad689db6d84140cca091093694d58be5_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,895 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0bcb597a8e542e4be6feae65877c5d3_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0bcb597a8e542e4be6feae65877c5d3_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,895 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bad5a3ad7ef743d39509f5363eaee9f2_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bad5a3ad7ef743d39509f5363eaee9f2_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,896 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bb8ec3c9752145c98fc173e5d4d52e7d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122bb8ec3c9752145c98fc173e5d4d52e7d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,897 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ccb8186f2d5b45209d97e01152c41a6d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ccb8186f2d5b45209d97e01152c41a6d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,898 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ce3e3913927346bdb0a35470123b639f_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ce3e3913927346bdb0a35470123b639f_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,899 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d03c496966a04a6db87962ba36225466_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d03c496966a04a6db87962ba36225466_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,900 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e9259034197f4817a83ec34c132dfba4_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e9259034197f4817a83ec34c132dfba4_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,901 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f04756e0f59f40feb8d3b616b763e456_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f04756e0f59f40feb8d3b616b763e456_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,901 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f319f9a9a3bf4083aaa1f16475e0119d_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f319f9a9a3bf4083aaa1f16475e0119d_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,902 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f939e7d463314423affca1c18497e606_c30eef12e99d24ea8c4e5ace242daf20 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f939e7d463314423affca1c18497e606_c30eef12e99d24ea8c4e5ace242daf20 2024-11-22T15:23:09,903 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:23:09,904 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,907 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:23:09,909 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:23:09,910 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,910 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:23:09,910 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732288989910"}]},"ts":"9223372036854775807"} 2024-11-22T15:23:09,912 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:23:09,912 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c30eef12e99d24ea8c4e5ace242daf20, NAME => 'TestAcidGuarantees,,1732288955451.c30eef12e99d24ea8c4e5ace242daf20.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:23:09,912 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:23:09,913 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732288989912"}]},"ts":"9223372036854775807"} 2024-11-22T15:23:09,914 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:23:09,925 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 70 msec 2024-11-22T15:23:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-22T15:23:09,961 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-22T15:23:09,968 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=237 (was 238), OpenFileDescriptor=449 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=666 (was 653) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4263 (was 4439) 2024-11-22T15:23:09,978 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=666, ProcessCount=11, AvailableMemoryMB=4262 2024-11-22T15:23:09,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:23:09,980 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:23:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:09,981 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:23:09,982 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:09,982 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:23:09,982 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-22T15:23:09,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:09,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742092_1268 (size=963) 2024-11-22T15:23:10,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:10,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:10,392 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:23:10,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742093_1269 (size=53) 2024-11-22T15:23:10,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing db373da2a391c371f2b4fcae935e7eac, disabling compactions & flushes 2024-11-22T15:23:10,801 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. after waiting 0 ms 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:10,801 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:10,801 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:10,802 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:23:10,803 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732288990802"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732288990802"}]},"ts":"1732288990802"} 2024-11-22T15:23:10,803 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:23:10,804 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:23:10,804 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288990804"}]},"ts":"1732288990804"} 2024-11-22T15:23:10,805 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:23:10,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, ASSIGN}] 2024-11-22T15:23:10,879 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, ASSIGN 2024-11-22T15:23:10,880 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:23:11,031 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=db373da2a391c371f2b4fcae935e7eac, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:11,032 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:11,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:11,187 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:11,187 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:23:11,188 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,188 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:23:11,188 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,188 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,191 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,192 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:11,192 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db373da2a391c371f2b4fcae935e7eac columnFamilyName A 2024-11-22T15:23:11,192 DEBUG [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:11,193 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(327): Store=db373da2a391c371f2b4fcae935e7eac/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:11,193 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,194 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:11,195 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db373da2a391c371f2b4fcae935e7eac columnFamilyName B 2024-11-22T15:23:11,195 DEBUG [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:11,195 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(327): Store=db373da2a391c371f2b4fcae935e7eac/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:11,195 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,197 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:11,197 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db373da2a391c371f2b4fcae935e7eac columnFamilyName C 2024-11-22T15:23:11,197 DEBUG [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:11,198 INFO [StoreOpener-db373da2a391c371f2b4fcae935e7eac-1 {}] regionserver.HStore(327): Store=db373da2a391c371f2b4fcae935e7eac/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:11,198 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:11,199 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,199 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,200 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:23:11,201 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:11,203 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:23:11,204 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened db373da2a391c371f2b4fcae935e7eac; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72968833, jitterRate=0.08732034265995026}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:23:11,205 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:11,205 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., pid=70, masterSystemTime=1732288991184 2024-11-22T15:23:11,207 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:11,207 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:11,207 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=db373da2a391c371f2b4fcae935e7eac, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:11,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-22T15:23:11,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 in 176 msec 2024-11-22T15:23:11,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-22T15:23:11,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, ASSIGN in 332 msec 2024-11-22T15:23:11,211 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:23:11,212 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732288991211"}]},"ts":"1732288991211"} 2024-11-22T15:23:11,212 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:23:11,278 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:23:11,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2980 sec 2024-11-22T15:23:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-22T15:23:12,090 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-22T15:23:12,093 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f66057f to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53bfce45 2024-11-22T15:23:12,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64dc42d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,115 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,117 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:23:12,118 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:23:12,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-22T15:23:12,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,136 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-22T15:23:12,152 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,154 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-22T15:23:12,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,188 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-22T15:23:12,217 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-11-22T15:23:12,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,238 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-22T15:23:12,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-22T15:23:12,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-22T15:23:12,323 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,325 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-22T15:23:12,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,338 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-11-22T15:23:12,368 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:12,387 DEBUG [hconnection-0x1fe6947a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,388 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,401 DEBUG [hconnection-0x1bd73203-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,403 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,424 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:12,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-22T15:23:12,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:12,428 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:12,429 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:12,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:12,435 DEBUG [hconnection-0x100fe920-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,436 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,448 DEBUG [hconnection-0x1d28cffe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,450 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:12,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:12,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,496 DEBUG [hconnection-0x7fe4df16-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,496 DEBUG [hconnection-0x5758b4ce-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,501 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,501 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46916, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,508 DEBUG [hconnection-0x3d7171d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,510 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:12,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289052526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289052529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289052532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289052529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289052536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,550 DEBUG [hconnection-0x2992478c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,551 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,558 DEBUG [hconnection-0x4e2fab12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,560 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,581 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:12,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:12,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,584 DEBUG [hconnection-0x7f1266f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:12,585 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:12,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d7e090bbcee240d9b31394f4c6758583 is 50, key is test_row_0/A:col10/1732288992489/Put/seqid=0 2024-11-22T15:23:12,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742094_1270 (size=12001) 2024-11-22T15:23:12,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d7e090bbcee240d9b31394f4c6758583 2024-11-22T15:23:12,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289052635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289052635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289052636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289052637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289052637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/5bdf946b880e4c2e9b97872e4716abb4 is 50, key is test_row_0/B:col10/1732288992489/Put/seqid=0 2024-11-22T15:23:12,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:12,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742095_1271 (size=12001) 2024-11-22T15:23:12,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:12,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:12,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/5bdf946b880e4c2e9b97872e4716abb4 2024-11-22T15:23:12,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f856d5026f53422baebe11109765cc0a is 50, key is test_row_0/C:col10/1732288992489/Put/seqid=0 2024-11-22T15:23:12,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742096_1272 (size=12001) 2024-11-22T15:23:12,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f856d5026f53422baebe11109765cc0a 2024-11-22T15:23:12,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d7e090bbcee240d9b31394f4c6758583 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583 2024-11-22T15:23:12,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583, entries=150, sequenceid=14, filesize=11.7 K 2024-11-22T15:23:12,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/5bdf946b880e4c2e9b97872e4716abb4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4 2024-11-22T15:23:12,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4, entries=150, sequenceid=14, filesize=11.7 K 2024-11-22T15:23:12,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f856d5026f53422baebe11109765cc0a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a 2024-11-22T15:23:12,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a, entries=150, sequenceid=14, filesize=11.7 K 2024-11-22T15:23:12,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for db373da2a391c371f2b4fcae935e7eac in 361ms, sequenceid=14, compaction requested=false 2024-11-22T15:23:12,854 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T15:23:12,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:12,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:12,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:12,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9db61d1cba43444ebdbe7ffee752a1c1 is 50, key is test_row_0/A:col10/1732288992865/Put/seqid=0 2024-11-22T15:23:12,888 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:12,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:12,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289052885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289052886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289052889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289052891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289052891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742097_1273 (size=12001) 2024-11-22T15:23:12,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289052995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289052997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:12,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289052997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289052998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289052997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:13,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289053201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289053201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289053203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289053203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289053209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9db61d1cba43444ebdbe7ffee752a1c1 2024-11-22T15:23:13,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/4bc37beddd944264831d0930f87c787d is 50, key is test_row_0/B:col10/1732288992865/Put/seqid=0 2024-11-22T15:23:13,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742098_1274 (size=12001) 2024-11-22T15:23:13,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/4bc37beddd944264831d0930f87c787d 2024-11-22T15:23:13,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/e2fd80c6a29b4e33abfd8e0db5aa6085 is 50, key is test_row_0/C:col10/1732288992865/Put/seqid=0 2024-11-22T15:23:13,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742099_1275 (size=12001) 2024-11-22T15:23:13,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/e2fd80c6a29b4e33abfd8e0db5aa6085 2024-11-22T15:23:13,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9db61d1cba43444ebdbe7ffee752a1c1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1 2024-11-22T15:23:13,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:23:13,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/4bc37beddd944264831d0930f87c787d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d 2024-11-22T15:23:13,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:23:13,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289053506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/e2fd80c6a29b4e33abfd8e0db5aa6085 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085 2024-11-22T15:23:13,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289053509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289053510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289053516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:23:13,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for db373da2a391c371f2b4fcae935e7eac in 654ms, sequenceid=39, compaction requested=false 2024-11-22T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:13,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:13,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:13,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:13,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/51aceac83fa840b68e231d5a7c4402a0 is 50, key is test_row_0/A:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742100_1276 (size=12001) 2024-11-22T15:23:13,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/51aceac83fa840b68e231d5a7c4402a0 2024-11-22T15:23:13,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/3df2f4feb3a540f284640e17566e3e77 is 50, key is test_row_0/B:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289053619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742101_1277 (size=12001) 2024-11-22T15:23:13,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/3df2f4feb3a540f284640e17566e3e77 2024-11-22T15:23:13,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d7f36ff1744e4fbead358a37bcc3409a is 50, key is test_row_0/C:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:13,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742102_1278 (size=12001) 2024-11-22T15:23:13,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d7f36ff1744e4fbead358a37bcc3409a 2024-11-22T15:23:13,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/51aceac83fa840b68e231d5a7c4402a0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0 2024-11-22T15:23:13,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0, entries=150, sequenceid=52, filesize=11.7 K 2024-11-22T15:23:13,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/3df2f4feb3a540f284640e17566e3e77 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77 2024-11-22T15:23:13,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289053725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77, entries=150, sequenceid=52, filesize=11.7 K 2024-11-22T15:23:13,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d7f36ff1744e4fbead358a37bcc3409a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a 2024-11-22T15:23:13,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a, entries=150, sequenceid=52, filesize=11.7 K 2024-11-22T15:23:13,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for db373da2a391c371f2b4fcae935e7eac in 220ms, sequenceid=52, compaction requested=true 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:13,745 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:13,745 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:13,746 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:13,746 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:13,747 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,747 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.2 K 2024-11-22T15:23:13,747 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:13,747 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:13,748 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,748 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.2 K 2024-11-22T15:23:13,748 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7e090bbcee240d9b31394f4c6758583, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732288992489 2024-11-22T15:23:13,748 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bdf946b880e4c2e9b97872e4716abb4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732288992489 2024-11-22T15:23:13,750 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9db61d1cba43444ebdbe7ffee752a1c1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288992504 2024-11-22T15:23:13,750 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bc37beddd944264831d0930f87c787d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288992504 2024-11-22T15:23:13,750 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51aceac83fa840b68e231d5a7c4402a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:13,751 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3df2f4feb3a540f284640e17566e3e77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:13,770 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:13,770 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/12ea23a3a3f9461e90a558c26caf9080 is 50, key is test_row_0/A:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,776 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:13,777 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/8a62b9222dd841e5a94ac0db45cc4f1b is 50, key is test_row_0/B:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742103_1279 (size=12104) 2024-11-22T15:23:13,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:13,816 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/12ea23a3a3f9461e90a558c26caf9080 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/12ea23a3a3f9461e90a558c26caf9080 2024-11-22T15:23:13,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-22T15:23:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,816 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:23:13,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:13,823 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 12ea23a3a3f9461e90a558c26caf9080(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:13,823 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:13,823 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732288993745; duration=0sec 2024-11-22T15:23:13,824 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:13,824 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:13,824 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:13,825 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:13,825 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:13,825 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:13,825 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.2 K 2024-11-22T15:23:13,825 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f856d5026f53422baebe11109765cc0a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732288992489 2024-11-22T15:23:13,826 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2fd80c6a29b4e33abfd8e0db5aa6085, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732288992504 2024-11-22T15:23:13,828 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7f36ff1744e4fbead358a37bcc3409a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:13,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742104_1280 (size=12104) 2024-11-22T15:23:13,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b7b9b93f13c64126a9785ff3ccada160 is 50, key is test_row_0/A:col10/1732288993618/Put/seqid=0 2024-11-22T15:23:13,883 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#236 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:13,884 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/357ba40cf59b4f8fb5769faba365c32f is 50, key is test_row_0/C:col10/1732288993520/Put/seqid=0 2024-11-22T15:23:13,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742105_1281 (size=12001) 2024-11-22T15:23:13,921 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b7b9b93f13c64126a9785ff3ccada160 2024-11-22T15:23:13,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742106_1282 (size=12104) 2024-11-22T15:23:13,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/ae6a79a0abda48b4879571aa413e3737 is 50, key is test_row_0/B:col10/1732288993618/Put/seqid=0 2024-11-22T15:23:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:13,937 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/357ba40cf59b4f8fb5769faba365c32f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/357ba40cf59b4f8fb5769faba365c32f 2024-11-22T15:23:13,946 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into 357ba40cf59b4f8fb5769faba365c32f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:13,946 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:13,946 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732288993745; duration=0sec 2024-11-22T15:23:13,946 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:13,946 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:13,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742107_1283 (size=12001) 2024-11-22T15:23:13,973 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/ae6a79a0abda48b4879571aa413e3737 2024-11-22T15:23:13,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b314895745da455182573f930aa026d1 is 50, key is test_row_0/C:col10/1732288993618/Put/seqid=0 2024-11-22T15:23:14,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289054000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289054014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289054017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742108_1284 (size=12001) 2024-11-22T15:23:14,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289054022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289054028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289054104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,259 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/8a62b9222dd841e5a94ac0db45cc4f1b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/8a62b9222dd841e5a94ac0db45cc4f1b 2024-11-22T15:23:14,267 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 8a62b9222dd841e5a94ac0db45cc4f1b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:14,267 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:14,267 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732288993745; duration=0sec 2024-11-22T15:23:14,267 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:14,267 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:14,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289054306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,420 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b314895745da455182573f930aa026d1 2024-11-22T15:23:14,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b7b9b93f13c64126a9785ff3ccada160 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160 2024-11-22T15:23:14,440 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:23:14,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/ae6a79a0abda48b4879571aa413e3737 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737 2024-11-22T15:23:14,449 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:23:14,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b314895745da455182573f930aa026d1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1 2024-11-22T15:23:14,457 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1, entries=150, sequenceid=75, filesize=11.7 K 2024-11-22T15:23:14,458 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for db373da2a391c371f2b4fcae935e7eac in 642ms, sequenceid=75, compaction requested=false 2024-11-22T15:23:14,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:14,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-22T15:23:14,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-22T15:23:14,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-22T15:23:14,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0300 sec 2024-11-22T15:23:14,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.0380 sec 2024-11-22T15:23:14,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-22T15:23:14,532 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-22T15:23:14,534 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-22T15:23:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:14,535 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:14,536 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:14,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:14,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:14,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:14,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:14,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/1a5f07f32de74bc3b1fc17b1c80b4623 is 50, key is test_row_0/A:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:14,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:14,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742109_1285 (size=12001) 2024-11-22T15:23:14,688 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:14,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:14,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289054725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:14,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289054827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:14,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:14,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:14,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:14,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:14,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:14,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289055022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289055026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289055026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289055029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289055034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/1a5f07f32de74bc3b1fc17b1c80b4623 2024-11-22T15:23:15,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c6e2361ddda8489781fd9a2acc85751e is 50, key is test_row_0/B:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:15,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742110_1286 (size=12001) 2024-11-22T15:23:15,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:15,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:15,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:15,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,151 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,277 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:23:15,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:15,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:15,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289055336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,457 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:15,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:15,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c6e2361ddda8489781fd9a2acc85751e 2024-11-22T15:23:15,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/cc90788a998d4dd18bd6e7bdb2a45bbb is 50, key is test_row_0/C:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742111_1287 (size=12001) 2024-11-22T15:23:15,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/cc90788a998d4dd18bd6e7bdb2a45bbb 2024-11-22T15:23:15,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/1a5f07f32de74bc3b1fc17b1c80b4623 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623 2024-11-22T15:23:15,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623, entries=150, sequenceid=92, filesize=11.7 K 2024-11-22T15:23:15,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c6e2361ddda8489781fd9a2acc85751e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e 2024-11-22T15:23:15,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e, entries=150, sequenceid=92, filesize=11.7 K 2024-11-22T15:23:15,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:15,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:15,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/cc90788a998d4dd18bd6e7bdb2a45bbb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb 2024-11-22T15:23:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:15,620 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb, entries=150, sequenceid=92, filesize=11.7 K 2024-11-22T15:23:15,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for db373da2a391c371f2b4fcae935e7eac in 1007ms, sequenceid=92, compaction requested=true 2024-11-22T15:23:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:15,622 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:15,622 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:15,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:15,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:15,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:15,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:15,623 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:15,623 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:15,623 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,623 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/12ea23a3a3f9461e90a558c26caf9080, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.3 K 2024-11-22T15:23:15,624 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:15,624 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12ea23a3a3f9461e90a558c26caf9080, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:15,624 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:15,624 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,624 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/8a62b9222dd841e5a94ac0db45cc4f1b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.3 K 2024-11-22T15:23:15,624 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a62b9222dd841e5a94ac0db45cc4f1b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:15,624 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7b9b93f13c64126a9785ff3ccada160, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288993607 2024-11-22T15:23:15,625 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ae6a79a0abda48b4879571aa413e3737, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288993607 2024-11-22T15:23:15,625 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a5f07f32de74bc3b1fc17b1c80b4623, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:15,625 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c6e2361ddda8489781fd9a2acc85751e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:15,644 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#242 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:15,644 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b2041c95b2704383b2453f31ac3f1feb is 50, key is test_row_0/A:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:15,654 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:15,654 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/f1c8366eab854df4ba34bfff12b3356c is 50, key is test_row_0/B:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:15,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742112_1288 (size=12207) 2024-11-22T15:23:15,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742113_1289 (size=12207) 2024-11-22T15:23:15,712 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b2041c95b2704383b2453f31ac3f1feb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b2041c95b2704383b2453f31ac3f1feb 2024-11-22T15:23:15,723 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into b2041c95b2704383b2453f31ac3f1feb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:15,723 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:15,723 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732288995622; duration=0sec 2024-11-22T15:23:15,723 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:15,723 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:15,723 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:15,725 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:15,725 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:15,725 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,725 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/357ba40cf59b4f8fb5769faba365c32f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.3 K 2024-11-22T15:23:15,725 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 357ba40cf59b4f8fb5769faba365c32f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732288992886 2024-11-22T15:23:15,727 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b314895745da455182573f930aa026d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732288993607 2024-11-22T15:23:15,728 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc90788a998d4dd18bd6e7bdb2a45bbb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:15,732 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/f1c8366eab854df4ba34bfff12b3356c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1c8366eab854df4ba34bfff12b3356c 2024-11-22T15:23:15,739 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into f1c8366eab854df4ba34bfff12b3356c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:15,739 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:15,739 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732288995622; duration=0sec 2024-11-22T15:23:15,739 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:15,739 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:15,740 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:15,740 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/35d73175da404be2a0c8484e4712f647 is 50, key is test_row_0/C:col10/1732288994612/Put/seqid=0 2024-11-22T15:23:15,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-22T15:23:15,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:15,762 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:15,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:15,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/97ad3f531c7746b6b1ebf507a7ca3c9b is 50, key is test_row_0/A:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:15,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742114_1290 (size=12207) 2024-11-22T15:23:15,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742115_1291 (size=12001) 2024-11-22T15:23:15,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:15,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:15,853 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/97ad3f531c7746b6b1ebf507a7ca3c9b 2024-11-22T15:23:15,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/f1869d1bea6f47caa0b84c4abb43a0a1 is 50, key is test_row_0/B:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742116_1292 (size=12001) 2024-11-22T15:23:15,932 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/f1869d1bea6f47caa0b84c4abb43a0a1 2024-11-22T15:23:15,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:15,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289055939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:15,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/8a94163c11ce448495dd4e1837d99b28 is 50, key is test_row_0/C:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:15,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742117_1293 (size=12001) 2024-11-22T15:23:16,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:16,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289056044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/35d73175da404be2a0c8484e4712f647 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/35d73175da404be2a0c8484e4712f647 2024-11-22T15:23:16,237 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into 35d73175da404be2a0c8484e4712f647(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:16,238 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:16,238 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732288995623; duration=0sec 2024-11-22T15:23:16,238 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:16,238 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:16,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:16,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289056248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,377 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/8a94163c11ce448495dd4e1837d99b28 2024-11-22T15:23:16,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/97ad3f531c7746b6b1ebf507a7ca3c9b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b 2024-11-22T15:23:16,399 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b, entries=150, sequenceid=116, filesize=11.7 K 2024-11-22T15:23:16,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/f1869d1bea6f47caa0b84c4abb43a0a1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1 2024-11-22T15:23:16,406 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1, entries=150, sequenceid=116, filesize=11.7 K 2024-11-22T15:23:16,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/8a94163c11ce448495dd4e1837d99b28 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28 2024-11-22T15:23:16,411 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28, entries=150, sequenceid=116, filesize=11.7 K 2024-11-22T15:23:16,412 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for db373da2a391c371f2b4fcae935e7eac in 650ms, sequenceid=116, compaction requested=false 2024-11-22T15:23:16,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:16,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:16,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-22T15:23:16,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-22T15:23:16,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-22T15:23:16,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8780 sec 2024-11-22T15:23:16,417 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.8830 sec 2024-11-22T15:23:16,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:23:16,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:16,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:16,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:16,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:16,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:16,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:16,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/92f85a5613db44a18bef6cfa6d9ba96e is 50, key is test_row_1/A:col10/1732288995919/Put/seqid=0 2024-11-22T15:23:16,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742118_1294 (size=9707) 2024-11-22T15:23:16,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/92f85a5613db44a18bef6cfa6d9ba96e 2024-11-22T15:23:16,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7710d4c63d5c428486b92ce26de7ca5f is 50, key is test_row_1/B:col10/1732288995919/Put/seqid=0 2024-11-22T15:23:16,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:16,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289056628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-22T15:23:16,640 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-22T15:23:16,642 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:16,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-22T15:23:16,643 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:16,644 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:16,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:16,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742119_1295 (size=9707) 2024-11-22T15:23:16,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:16,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289056735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:16,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:16,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:16,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:16,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:16,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:16,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:16,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:16,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:16,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289056939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:16,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:16,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:16,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:16,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:16,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:16,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:16,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289057036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,038 DEBUG [Thread-1237 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:17,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289057040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,041 DEBUG [Thread-1235 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:17,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289057066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7710d4c63d5c428486b92ce26de7ca5f 2024-11-22T15:23:17,069 DEBUG [Thread-1239 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:17,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289057075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,076 DEBUG [Thread-1231 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:17,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/44772458fc9e471e98f772eee58c8a40 is 50, key is test_row_1/C:col10/1732288995919/Put/seqid=0 2024-11-22T15:23:17,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:17,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:17,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742120_1296 (size=9707) 2024-11-22T15:23:17,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289057242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:17,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:17,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:17,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,411 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:17,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/44772458fc9e471e98f772eee58c8a40 2024-11-22T15:23:17,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/92f85a5613db44a18bef6cfa6d9ba96e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e 2024-11-22T15:23:17,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e, entries=100, sequenceid=132, filesize=9.5 K 2024-11-22T15:23:17,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7710d4c63d5c428486b92ce26de7ca5f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f 2024-11-22T15:23:17,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:17,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:17,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:17,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f, entries=100, sequenceid=132, filesize=9.5 K 2024-11-22T15:23:17,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/44772458fc9e471e98f772eee58c8a40 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40 2024-11-22T15:23:17,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40, entries=100, sequenceid=132, filesize=9.5 K 2024-11-22T15:23:17,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for db373da2a391c371f2b4fcae935e7eac in 1022ms, sequenceid=132, compaction requested=true 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:17,580 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:17,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:23:17,580 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:17,582 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:17,582 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:17,582 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,582 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1c8366eab854df4ba34bfff12b3356c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=33.1 K 2024-11-22T15:23:17,582 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f1c8366eab854df4ba34bfff12b3356c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:17,582 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:17,582 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:17,582 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,583 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b2041c95b2704383b2453f31ac3f1feb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=33.1 K 2024-11-22T15:23:17,583 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f1869d1bea6f47caa0b84c4abb43a0a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732288994716 2024-11-22T15:23:17,583 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7710d4c63d5c428486b92ce26de7ca5f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288995919 2024-11-22T15:23:17,583 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2041c95b2704383b2453f31ac3f1feb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:17,584 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97ad3f531c7746b6b1ebf507a7ca3c9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732288994716 2024-11-22T15:23:17,584 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92f85a5613db44a18bef6cfa6d9ba96e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288995919 2024-11-22T15:23:17,596 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:17,597 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d1e80a1d4252476e9b8ece586587807b is 50, key is test_row_0/B:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:17,610 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:17,611 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/165ff4519f004aa2a51f14567d1cbd1c is 50, key is test_row_0/A:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:17,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742122_1298 (size=12359) 2024-11-22T15:23:17,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742121_1297 (size=12359) 2024-11-22T15:23:17,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-22T15:23:17,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:17,720 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:17,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9a0f8e85e0d041b5b177d010d2f2a35d is 50, key is test_row_0/A:col10/1732288996581/Put/seqid=0 2024-11-22T15:23:17,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:17,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:17,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:17,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742123_1299 (size=12151) 2024-11-22T15:23:17,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289057793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:17,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:17,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289057896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,041 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/165ff4519f004aa2a51f14567d1cbd1c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/165ff4519f004aa2a51f14567d1cbd1c 2024-11-22T15:23:18,047 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 165ff4519f004aa2a51f14567d1cbd1c(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:18,047 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,047 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732288997580; duration=0sec 2024-11-22T15:23:18,047 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:18,047 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:18,047 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:18,049 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:18,049 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:18,049 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,049 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/35d73175da404be2a0c8484e4712f647, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=33.1 K 2024-11-22T15:23:18,050 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35d73175da404be2a0c8484e4712f647, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732288993969 2024-11-22T15:23:18,050 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a94163c11ce448495dd4e1837d99b28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732288994716 2024-11-22T15:23:18,051 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44772458fc9e471e98f772eee58c8a40, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288995919 2024-11-22T15:23:18,073 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d1e80a1d4252476e9b8ece586587807b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d1e80a1d4252476e9b8ece586587807b 2024-11-22T15:23:18,082 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into d1e80a1d4252476e9b8ece586587807b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:18,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,082 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732288997580; duration=0sec 2024-11-22T15:23:18,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:18,082 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:18,084 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#254 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:18,085 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/a307250c7dbc4baca40524ec2bc1f45b is 50, key is test_row_0/C:col10/1732288994724/Put/seqid=0 2024-11-22T15:23:18,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289058100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742124_1300 (size=12359) 2024-11-22T15:23:18,156 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9a0f8e85e0d041b5b177d010d2f2a35d 2024-11-22T15:23:18,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bb25e3f90cf14bedbf7aa3f15db28bb8 is 50, key is test_row_0/B:col10/1732288996581/Put/seqid=0 2024-11-22T15:23:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742125_1301 (size=12151) 2024-11-22T15:23:18,233 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bb25e3f90cf14bedbf7aa3f15db28bb8 2024-11-22T15:23:18,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/fe8c30f83f034de882ba003915e7d53c is 50, key is test_row_0/C:col10/1732288996581/Put/seqid=0 2024-11-22T15:23:18,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742126_1302 (size=12151) 2024-11-22T15:23:18,304 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/fe8c30f83f034de882ba003915e7d53c 2024-11-22T15:23:18,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9a0f8e85e0d041b5b177d010d2f2a35d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d 2024-11-22T15:23:18,318 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d, entries=150, sequenceid=153, filesize=11.9 K 2024-11-22T15:23:18,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bb25e3f90cf14bedbf7aa3f15db28bb8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8 2024-11-22T15:23:18,326 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8, entries=150, sequenceid=153, filesize=11.9 K 2024-11-22T15:23:18,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/fe8c30f83f034de882ba003915e7d53c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c 2024-11-22T15:23:18,336 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c, entries=150, sequenceid=153, filesize=11.9 K 2024-11-22T15:23:18,336 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for db373da2a391c371f2b4fcae935e7eac in 617ms, sequenceid=153, compaction requested=false 2024-11-22T15:23:18,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-22T15:23:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-22T15:23:18,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-22T15:23:18,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6940 sec 2024-11-22T15:23:18,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.6970 sec 2024-11-22T15:23:18,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:18,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:18,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/748ed977858647fd93373f6dcc2bed46 is 50, key is test_row_0/A:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742127_1303 (size=14541) 2024-11-22T15:23:18,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/748ed977858647fd93373f6dcc2bed46 2024-11-22T15:23:18,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/379074cd271d42ab9b77650eca1ef142 is 50, key is test_row_0/B:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289058473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742128_1304 (size=12151) 2024-11-22T15:23:18,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/379074cd271d42ab9b77650eca1ef142 2024-11-22T15:23:18,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4af4a91e2c0448abb3a342617a5e0ea6 is 50, key is test_row_0/C:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742129_1305 (size=12151) 2024-11-22T15:23:18,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4af4a91e2c0448abb3a342617a5e0ea6 2024-11-22T15:23:18,549 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/a307250c7dbc4baca40524ec2bc1f45b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a307250c7dbc4baca40524ec2bc1f45b 2024-11-22T15:23:18,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/748ed977858647fd93373f6dcc2bed46 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46 2024-11-22T15:23:18,561 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into a307250c7dbc4baca40524ec2bc1f45b(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:18,561 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,561 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732288997580; duration=0sec 2024-11-22T15:23:18,561 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:18,561 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:18,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46, entries=200, sequenceid=171, filesize=14.2 K 2024-11-22T15:23:18,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/379074cd271d42ab9b77650eca1ef142 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142 2024-11-22T15:23:18,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142, entries=150, sequenceid=171, filesize=11.9 K 2024-11-22T15:23:18,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4af4a91e2c0448abb3a342617a5e0ea6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6 2024-11-22T15:23:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289058577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6, entries=150, sequenceid=171, filesize=11.9 K 2024-11-22T15:23:18,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for db373da2a391c371f2b4fcae935e7eac in 175ms, sequenceid=171, compaction requested=true 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:18,583 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:18,583 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:18,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:18,591 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,591 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,591 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d1e80a1d4252476e9b8ece586587807b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.8 K 2024-11-22T15:23:18,591 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/165ff4519f004aa2a51f14567d1cbd1c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=38.1 K 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 165ff4519f004aa2a51f14567d1cbd1c, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288994724 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d1e80a1d4252476e9b8ece586587807b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288994724 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bb25e3f90cf14bedbf7aa3f15db28bb8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732288996581 2024-11-22T15:23:18,591 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a0f8e85e0d041b5b177d010d2f2a35d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732288996581 2024-11-22T15:23:18,592 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 748ed977858647fd93373f6dcc2bed46, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997773 2024-11-22T15:23:18,592 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 379074cd271d42ab9b77650eca1ef142, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997777 2024-11-22T15:23:18,607 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:18,607 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/0e4701a5f95e4881a4cccc519f2703a1 is 50, key is test_row_0/B:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,621 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:18,623 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/5b72644bed8e4b9d9a4e44fb1c7cfea7 is 50, key is test_row_0/A:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742130_1306 (size=12561) 2024-11-22T15:23:18,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742131_1307 (size=12561) 2024-11-22T15:23:18,683 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/5b72644bed8e4b9d9a4e44fb1c7cfea7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/5b72644bed8e4b9d9a4e44fb1c7cfea7 2024-11-22T15:23:18,689 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 5b72644bed8e4b9d9a4e44fb1c7cfea7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:18,689 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:18,689 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732288998583; duration=0sec 2024-11-22T15:23:18,689 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:18,689 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:18,689 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:18,690 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:18,690 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:18,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a307250c7dbc4baca40524ec2bc1f45b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=35.8 K 2024-11-22T15:23:18,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a307250c7dbc4baca40524ec2bc1f45b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732288994724 2024-11-22T15:23:18,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe8c30f83f034de882ba003915e7d53c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732288996581 2024-11-22T15:23:18,693 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4af4a91e2c0448abb3a342617a5e0ea6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997777 2024-11-22T15:23:18,700 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:18,701 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/102b1e5faca34e3888644ac750912cc9 is 50, key is test_row_0/C:col10/1732288997777/Put/seqid=0 2024-11-22T15:23:18,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742132_1308 (size=12561) 2024-11-22T15:23:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-22T15:23:18,748 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-22T15:23:18,750 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-22T15:23:18,752 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:18,752 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:18,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:18,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:18,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6522ba52c9104e9fb3aa52f04406841c is 50, key is test_row_0/A:col10/1732288998782/Put/seqid=0 2024-11-22T15:23:18,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289058845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:18,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742133_1309 (size=12151) 2024-11-22T15:23:18,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:18,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:18,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:18,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289058952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:19,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,061 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/0e4701a5f95e4881a4cccc519f2703a1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/0e4701a5f95e4881a4cccc519f2703a1 2024-11-22T15:23:19,067 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 0e4701a5f95e4881a4cccc519f2703a1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:19,067 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:19,067 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732288998583; duration=0sec 2024-11-22T15:23:19,067 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:19,067 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:19,143 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/102b1e5faca34e3888644ac750912cc9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/102b1e5faca34e3888644ac750912cc9 2024-11-22T15:23:19,149 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into 102b1e5faca34e3888644ac750912cc9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:19,149 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:19,149 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732288998583; duration=0sec 2024-11-22T15:23:19,149 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:19,149 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:19,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289059157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,212 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6522ba52c9104e9fb3aa52f04406841c 2024-11-22T15:23:19,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/124fcd6eda764915b5a23d6795e6afe2 is 50, key is test_row_0/B:col10/1732288998782/Put/seqid=0 2024-11-22T15:23:19,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742134_1310 (size=12151) 2024-11-22T15:23:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:19,366 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:19,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289059459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:19,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/124fcd6eda764915b5a23d6795e6afe2 2024-11-22T15:23:19,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4dec7fb5eba747408dc1e1da95f786b6 is 50, key is test_row_0/C:col10/1732288998782/Put/seqid=0 2024-11-22T15:23:19,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742135_1311 (size=12151) 2024-11-22T15:23:19,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4dec7fb5eba747408dc1e1da95f786b6 2024-11-22T15:23:19,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6522ba52c9104e9fb3aa52f04406841c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c 2024-11-22T15:23:19,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T15:23:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/124fcd6eda764915b5a23d6795e6afe2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2 2024-11-22T15:23:19,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T15:23:19,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/4dec7fb5eba747408dc1e1da95f786b6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6 2024-11-22T15:23:19,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6, entries=150, sequenceid=194, filesize=11.9 K 2024-11-22T15:23:19,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for db373da2a391c371f2b4fcae935e7eac in 991ms, sequenceid=194, compaction requested=false 2024-11-22T15:23:19,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:19,827 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:19,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-22T15:23:19,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:19,827 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:19,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:19,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/fb2a2ed675fd4ab0941fc8449ed3728a is 50, key is test_row_0/A:col10/1732288998839/Put/seqid=0 2024-11-22T15:23:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:19,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742136_1312 (size=12151) 2024-11-22T15:23:19,869 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/fb2a2ed675fd4ab0941fc8449ed3728a 2024-11-22T15:23:19,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7e54ab0930d54201818967720bb8ae79 is 50, key is test_row_0/B:col10/1732288998839/Put/seqid=0 2024-11-22T15:23:19,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742137_1313 (size=12151) 2024-11-22T15:23:19,936 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7e54ab0930d54201818967720bb8ae79 2024-11-22T15:23:19,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b3ea63ea34b44498a9734a1e5c317088 is 50, key is test_row_0/C:col10/1732288998839/Put/seqid=0 2024-11-22T15:23:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:19,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:19,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742138_1314 (size=12151) 2024-11-22T15:23:20,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:20,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:20,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:20,384 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b3ea63ea34b44498a9734a1e5c317088 2024-11-22T15:23:20,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/fb2a2ed675fd4ab0941fc8449ed3728a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a 2024-11-22T15:23:20,395 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T15:23:20,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7e54ab0930d54201818967720bb8ae79 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79 2024-11-22T15:23:20,403 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T15:23:20,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b3ea63ea34b44498a9734a1e5c317088 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088 2024-11-22T15:23:20,412 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088, entries=150, sequenceid=211, filesize=11.9 K 2024-11-22T15:23:20,415 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for db373da2a391c371f2b4fcae935e7eac in 588ms, sequenceid=211, compaction requested=true 2024-11-22T15:23:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:20,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-22T15:23:20,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-22T15:23:20,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-22T15:23:20,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6650 sec 2024-11-22T15:23:20,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.6710 sec 2024-11-22T15:23:20,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:20,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:20,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:20,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/83734b1362474e8694141671d4028866 is 50, key is test_row_0/A:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:20,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:20,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742139_1315 (size=14541) 2024-11-22T15:23:20,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:20,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-22T15:23:20,856 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-22T15:23:20,859 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:20,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-22T15:23:20,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:20,861 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:20,862 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:20,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:20,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:20,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289060983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289061042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,045 DEBUG [Thread-1237 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:21,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289061073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,077 DEBUG [Thread-1235 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:21,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289061084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,086 DEBUG [Thread-1231 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:21,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289061088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,090 DEBUG [Thread-1239 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:21,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/83734b1362474e8694141671d4028866 2024-11-22T15:23:21,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d13e6fdefac44addbd3e9c87e17aabf5 is 50, key is test_row_0/B:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:21,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742140_1316 (size=12151) 2024-11-22T15:23:21,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:21,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d13e6fdefac44addbd3e9c87e17aabf5 2024-11-22T15:23:21,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/bb16cf7a24614bc69a1bb674600faa6f is 50, key is test_row_0/C:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:21,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742141_1317 (size=12151) 2024-11-22T15:23:21,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/bb16cf7a24614bc69a1bb674600faa6f 2024-11-22T15:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/83734b1362474e8694141671d4028866 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866 2024-11-22T15:23:21,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866, entries=200, sequenceid=233, filesize=14.2 K 2024-11-22T15:23:21,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d13e6fdefac44addbd3e9c87e17aabf5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5 2024-11-22T15:23:21,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T15:23:21,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/bb16cf7a24614bc69a1bb674600faa6f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f 2024-11-22T15:23:21,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f, entries=150, sequenceid=233, filesize=11.9 K 2024-11-22T15:23:21,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for db373da2a391c371f2b4fcae935e7eac in 613ms, sequenceid=233, compaction requested=true 2024-11-22T15:23:21,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:21,253 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:21,254 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:21,254 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:21,254 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,254 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/5b72644bed8e4b9d9a4e44fb1c7cfea7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=50.2 K 2024-11-22T15:23:21,255 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b72644bed8e4b9d9a4e44fb1c7cfea7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997777 2024-11-22T15:23:21,255 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6522ba52c9104e9fb3aa52f04406841c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288998448 2024-11-22T15:23:21,255 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb2a2ed675fd4ab0941fc8449ed3728a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288998813 2024-11-22T15:23:21,256 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83734b1362474e8694141671d4028866, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:21,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:21,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:21,262 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:21,263 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:21,263 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:21,263 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,264 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/0e4701a5f95e4881a4cccc519f2703a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=47.9 K 2024-11-22T15:23:21,264 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e4701a5f95e4881a4cccc519f2703a1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997777 2024-11-22T15:23:21,264 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 124fcd6eda764915b5a23d6795e6afe2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288998448 2024-11-22T15:23:21,264 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e54ab0930d54201818967720bb8ae79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288998813 2024-11-22T15:23:21,265 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d13e6fdefac44addbd3e9c87e17aabf5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:21,270 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#272 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:21,270 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/803d42be51c141dbb2954bac00793ac0 is 50, key is test_row_0/A:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:21,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:21,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:21,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:21,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:21,281 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:21,282 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/29f5540501f5404fbd45a11d16aba0d9 is 50, key is test_row_0/B:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:21,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:21,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:23:21,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:21,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:21,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:21,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:21,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:21,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:21,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742143_1319 (size=12697) 2024-11-22T15:23:21,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c70f798a71b44c4d92253e8af774ea4a is 50, key is test_row_0/A:col10/1732289000660/Put/seqid=0 2024-11-22T15:23:21,309 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/29f5540501f5404fbd45a11d16aba0d9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29f5540501f5404fbd45a11d16aba0d9 2024-11-22T15:23:21,314 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 29f5540501f5404fbd45a11d16aba0d9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:21,314 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:21,314 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=12, startTime=1732289001262; duration=0sec 2024-11-22T15:23:21,314 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:21,314 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:21,315 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:21,316 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:21,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742142_1318 (size=12697) 2024-11-22T15:23:21,320 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:21,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,320 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,320 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/102b1e5faca34e3888644ac750912cc9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=47.9 K 2024-11-22T15:23:21,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,324 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 102b1e5faca34e3888644ac750912cc9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732288997777 2024-11-22T15:23:21,325 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dec7fb5eba747408dc1e1da95f786b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732288998448 2024-11-22T15:23:21,326 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b3ea63ea34b44498a9734a1e5c317088, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732288998813 2024-11-22T15:23:21,328 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bb16cf7a24614bc69a1bb674600faa6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:21,328 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/803d42be51c141dbb2954bac00793ac0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/803d42be51c141dbb2954bac00793ac0 2024-11-22T15:23:21,333 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 803d42be51c141dbb2954bac00793ac0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:21,333 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:21,333 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=12, startTime=1732289001252; duration=0sec 2024-11-22T15:23:21,333 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:21,333 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:21,368 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#275 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:21,368 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f9e24c6436634b04a398ad0c7e3e7c13 is 50, key is test_row_0/C:col10/1732289000638/Put/seqid=0 2024-11-22T15:23:21,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289061374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742144_1320 (size=14541) 2024-11-22T15:23:21,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742145_1321 (size=12697) 2024-11-22T15:23:21,437 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f9e24c6436634b04a398ad0c7e3e7c13 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f9e24c6436634b04a398ad0c7e3e7c13 2024-11-22T15:23:21,448 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into f9e24c6436634b04a398ad0c7e3e7c13(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:21,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:21,448 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=12, startTime=1732289001271; duration=0sec 2024-11-22T15:23:21,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:21,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:21,473 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289061476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,627 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289061682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,779 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c70f798a71b44c4d92253e8af774ea4a 2024-11-22T15:23:21,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d338f9e2f4b8458bafee9ed934a5cc05 is 50, key is test_row_0/B:col10/1732289000660/Put/seqid=0 2024-11-22T15:23:21,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742146_1322 (size=12151) 2024-11-22T15:23:21,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d338f9e2f4b8458bafee9ed934a5cc05 2024-11-22T15:23:21,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/56df87490711428fbed196971dbec570 is 50, key is test_row_0/C:col10/1732289000660/Put/seqid=0 2024-11-22T15:23:21,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742147_1323 (size=12151) 2024-11-22T15:23:21,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:21,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:21,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:21,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:21,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:21,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:21,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:21,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289061988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:22,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:22,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,244 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:22,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:22,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:22,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/56df87490711428fbed196971dbec570 2024-11-22T15:23:22,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c70f798a71b44c4d92253e8af774ea4a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a 2024-11-22T15:23:22,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a, entries=200, sequenceid=248, filesize=14.2 K 2024-11-22T15:23:22,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d338f9e2f4b8458bafee9ed934a5cc05 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05 2024-11-22T15:23:22,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05, entries=150, sequenceid=248, filesize=11.9 K 2024-11-22T15:23:22,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/56df87490711428fbed196971dbec570 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570 2024-11-22T15:23:22,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570, entries=150, sequenceid=248, filesize=11.9 K 2024-11-22T15:23:22,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for db373da2a391c371f2b4fcae935e7eac in 1044ms, sequenceid=248, compaction requested=false 2024-11-22T15:23:22,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:22,397 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-22T15:23:22,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,397 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/dde44970600d4a768aa654cdbbabdaa6 is 50, key is test_row_0/A:col10/1732289001372/Put/seqid=0 2024-11-22T15:23:22,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742148_1324 (size=12301) 2024-11-22T15:23:22,438 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/dde44970600d4a768aa654cdbbabdaa6 2024-11-22T15:23:22,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bc18ee80de014ddfa1be627533faa96c is 50, key is test_row_0/B:col10/1732289001372/Put/seqid=0 2024-11-22T15:23:22,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742149_1325 (size=12301) 2024-11-22T15:23:22,481 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bc18ee80de014ddfa1be627533faa96c 2024-11-22T15:23:22,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/39aaf70d16984785874bd5c9eb6e5f8f is 50, key is test_row_0/C:col10/1732289001372/Put/seqid=0 2024-11-22T15:23:22,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:22,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:22,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742150_1326 (size=12301) 2024-11-22T15:23:22,526 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/39aaf70d16984785874bd5c9eb6e5f8f 2024-11-22T15:23:22,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:22,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289062535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/dde44970600d4a768aa654cdbbabdaa6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6 2024-11-22T15:23:22,545 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T15:23:22,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/bc18ee80de014ddfa1be627533faa96c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c 2024-11-22T15:23:22,557 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T15:23:22,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/39aaf70d16984785874bd5c9eb6e5f8f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f 2024-11-22T15:23:22,567 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f, entries=150, sequenceid=272, filesize=12.0 K 2024-11-22T15:23:22,568 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for db373da2a391c371f2b4fcae935e7eac in 171ms, sequenceid=272, compaction requested=true 2024-11-22T15:23:22,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:22,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-22T15:23:22,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-22T15:23:22,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-22T15:23:22,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7070 sec 2024-11-22T15:23:22,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.7120 sec 2024-11-22T15:23:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:22,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:22,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6e822e73384c4315934caf3b557368fc is 50, key is test_row_0/A:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742151_1327 (size=14741) 2024-11-22T15:23:22,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6e822e73384c4315934caf3b557368fc 2024-11-22T15:23:22,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/2ec15a0a5f274a53931ddb6e61f73e58 is 50, key is test_row_0/B:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742152_1328 (size=12301) 2024-11-22T15:23:22,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/2ec15a0a5f274a53931ddb6e61f73e58 2024-11-22T15:23:22,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:22,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0de77d2821eb47edb504ad8a6330518b is 50, key is test_row_0/C:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289062713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742153_1329 (size=12301) 2024-11-22T15:23:22,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0de77d2821eb47edb504ad8a6330518b 2024-11-22T15:23:22,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/6e822e73384c4315934caf3b557368fc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc 2024-11-22T15:23:22,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc, entries=200, sequenceid=288, filesize=14.4 K 2024-11-22T15:23:22,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/2ec15a0a5f274a53931ddb6e61f73e58 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58 2024-11-22T15:23:22,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58, entries=150, sequenceid=288, filesize=12.0 K 2024-11-22T15:23:22,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0de77d2821eb47edb504ad8a6330518b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b 2024-11-22T15:23:22,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b, entries=150, sequenceid=288, filesize=12.0 K 2024-11-22T15:23:22,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for db373da2a391c371f2b4fcae935e7eac in 150ms, sequenceid=288, compaction requested=true 2024-11-22T15:23:22,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:22,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:22,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:22,792 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:22,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:22,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:22,792 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:22,793 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54280 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:22,793 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:22,794 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,794 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/803d42be51c141dbb2954bac00793ac0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=53.0 K 2024-11-22T15:23:22,794 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:22,794 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:22,794 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,794 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29f5540501f5404fbd45a11d16aba0d9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=48.3 K 2024-11-22T15:23:22,795 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 29f5540501f5404fbd45a11d16aba0d9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:22,796 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 803d42be51c141dbb2954bac00793ac0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:22,796 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d338f9e2f4b8458bafee9ed934a5cc05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732289000660 2024-11-22T15:23:22,796 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting c70f798a71b44c4d92253e8af774ea4a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732289000660 2024-11-22T15:23:22,797 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bc18ee80de014ddfa1be627533faa96c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732289001344 2024-11-22T15:23:22,797 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ec15a0a5f274a53931ddb6e61f73e58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:22,798 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dde44970600d4a768aa654cdbbabdaa6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732289001344 2024-11-22T15:23:22,798 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e822e73384c4315934caf3b557368fc, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:22,818 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#284 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:22,819 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/907bd924ee3741bab234c7dade422657 is 50, key is test_row_0/B:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:22,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:22,831 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:22,832 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ac624757eb554caf9629c4fdb1231ea8 is 50, key is test_row_0/A:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/55623d1cfac54b3eb9761e6932907718 is 50, key is test_row_0/A:col10/1732289002820/Put/seqid=0 2024-11-22T15:23:22,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742154_1330 (size=12983) 2024-11-22T15:23:22,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:22,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 314 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289062891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:22,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742155_1331 (size=12983) 2024-11-22T15:23:22,923 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ac624757eb554caf9629c4fdb1231ea8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ac624757eb554caf9629c4fdb1231ea8 2024-11-22T15:23:22,931 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into ac624757eb554caf9629c4fdb1231ea8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:22,932 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:22,932 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=12, startTime=1732289002792; duration=0sec 2024-11-22T15:23:22,932 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:22,932 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:22,932 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:22,933 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:22,933 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:22,933 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:22,934 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f9e24c6436634b04a398ad0c7e3e7c13, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=48.3 K 2024-11-22T15:23:22,934 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9e24c6436634b04a398ad0c7e3e7c13, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732289000007 2024-11-22T15:23:22,934 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56df87490711428fbed196971dbec570, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732289000660 2024-11-22T15:23:22,935 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39aaf70d16984785874bd5c9eb6e5f8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1732289001344 2024-11-22T15:23:22,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742156_1332 (size=14741) 2024-11-22T15:23:22,935 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0de77d2821eb47edb504ad8a6330518b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:22,961 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#287 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:22,962 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/e825f40dce584b6f98b703548b5eb9bd is 50, key is test_row_0/C:col10/1732289002640/Put/seqid=0 2024-11-22T15:23:22,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-22T15:23:22,967 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-22T15:23:22,968 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:22,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-22T15:23:22,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:22,969 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:22,970 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:22,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:22,992 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:23:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742157_1333 (size=12983) 2024-11-22T15:23:23,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:23,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 316 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289062999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,007 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/e825f40dce584b6f98b703548b5eb9bd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e825f40dce584b6f98b703548b5eb9bd 2024-11-22T15:23:23,014 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into e825f40dce584b6f98b703548b5eb9bd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:23,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:23,014 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=12, startTime=1732289002792; duration=0sec 2024-11-22T15:23:23,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:23,015 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:23,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:23,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:23,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 318 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289063207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:23,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,294 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/907bd924ee3741bab234c7dade422657 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/907bd924ee3741bab234c7dade422657 2024-11-22T15:23:23,298 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 907bd924ee3741bab234c7dade422657(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:23,298 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:23,298 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=12, startTime=1732289002792; duration=0sec 2024-11-22T15:23:23,298 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:23,298 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:23,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/55623d1cfac54b3eb9761e6932907718 2024-11-22T15:23:23,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7a8b93dc5a8841bc94a31d8f6b884ace is 50, key is test_row_0/B:col10/1732289002820/Put/seqid=0 2024-11-22T15:23:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742158_1334 (size=12301) 2024-11-22T15:23:23,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7a8b93dc5a8841bc94a31d8f6b884ace 2024-11-22T15:23:23,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/40d6145fee874e1d86b48b91e8b1283a is 50, key is test_row_0/C:col10/1732289002820/Put/seqid=0 2024-11-22T15:23:23,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742159_1335 (size=12301) 2024-11-22T15:23:23,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/40d6145fee874e1d86b48b91e8b1283a 2024-11-22T15:23:23,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/55623d1cfac54b3eb9761e6932907718 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718 2024-11-22T15:23:23,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718, entries=200, sequenceid=309, filesize=14.4 K 2024-11-22T15:23:23,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7a8b93dc5a8841bc94a31d8f6b884ace as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace 2024-11-22T15:23:23,435 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace, entries=150, sequenceid=309, filesize=12.0 K 2024-11-22T15:23:23,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/40d6145fee874e1d86b48b91e8b1283a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a 2024-11-22T15:23:23,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a, entries=150, sequenceid=309, filesize=12.0 K 2024-11-22T15:23:23,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for db373da2a391c371f2b4fcae935e7eac in 633ms, sequenceid=309, compaction requested=false 2024-11-22T15:23:23,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:23,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:23,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:23,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:23,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/485fb5b2233d4386a3e80b7c5ab86311 is 50, key is test_row_0/A:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:23,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742160_1336 (size=12301) 2024-11-22T15:23:23,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/485fb5b2233d4386a3e80b7c5ab86311 2024-11-22T15:23:23,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/231c8b602e4b4e5aad5e554da3f23808 is 50, key is test_row_0/B:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:23,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:23,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742161_1337 (size=12301) 2024-11-22T15:23:23,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 338 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289063589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 340 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289063693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:23,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:23,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:23,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:23,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 342 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289063900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:23,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/231c8b602e4b4e5aad5e554da3f23808 2024-11-22T15:23:24,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/61546be3a9a040bcb9c568325006d48b is 50, key is test_row_0/C:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:24,050 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:24,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:24,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742162_1338 (size=12301) 2024-11-22T15:23:24,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:24,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 344 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289064205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:24,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:24,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,358 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/61546be3a9a040bcb9c568325006d48b 2024-11-22T15:23:24,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/485fb5b2233d4386a3e80b7c5ab86311 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311 2024-11-22T15:23:24,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311, entries=150, sequenceid=328, filesize=12.0 K 2024-11-22T15:23:24,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/231c8b602e4b4e5aad5e554da3f23808 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808 2024-11-22T15:23:24,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808, entries=150, sequenceid=328, filesize=12.0 K 2024-11-22T15:23:24,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:24,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:24,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/61546be3a9a040bcb9c568325006d48b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b 2024-11-22T15:23:24,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b, entries=150, sequenceid=328, filesize=12.0 K 2024-11-22T15:23:24,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for db373da2a391c371f2b4fcae935e7eac in 1018ms, sequenceid=328, compaction requested=true 2024-11-22T15:23:24,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:24,531 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:24,533 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:24,533 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:24,533 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,533 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ac624757eb554caf9629c4fdb1231ea8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=39.1 K 2024-11-22T15:23:24,533 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac624757eb554caf9629c4fdb1231ea8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:24,534 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55623d1cfac54b3eb9761e6932907718, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732289002702 2024-11-22T15:23:24,534 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 485fb5b2233d4386a3e80b7c5ab86311, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:24,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:24,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:24,534 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:24,536 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:24,536 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:24,536 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,536 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/907bd924ee3741bab234c7dade422657, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.7 K 2024-11-22T15:23:24,536 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 907bd924ee3741bab234c7dade422657, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:24,537 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a8b93dc5a8841bc94a31d8f6b884ace, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732289002706 2024-11-22T15:23:24,538 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 231c8b602e4b4e5aad5e554da3f23808, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:24,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:24,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:24,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:24,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:24,552 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#293 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:24,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/1b79cffdbf684b5fa2ef1685a19383cc is 50, key is test_row_0/A:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:24,566 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:24,566 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/9da7d7ede3d545cdaa7daf0664aea685 is 50, key is test_row_0/B:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:24,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742163_1339 (size=13085) 2024-11-22T15:23:24,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742164_1340 (size=13085) 2024-11-22T15:23:24,622 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/9da7d7ede3d545cdaa7daf0664aea685 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/9da7d7ede3d545cdaa7daf0664aea685 2024-11-22T15:23:24,628 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 9da7d7ede3d545cdaa7daf0664aea685(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:24,628 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:24,629 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732289004534; duration=0sec 2024-11-22T15:23:24,629 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:24,629 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:24,629 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:24,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:24,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:24,631 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,631 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e825f40dce584b6f98b703548b5eb9bd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.7 K 2024-11-22T15:23:24,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e825f40dce584b6f98b703548b5eb9bd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289002504 2024-11-22T15:23:24,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 40d6145fee874e1d86b48b91e8b1283a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1732289002706 2024-11-22T15:23:24,632 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 61546be3a9a040bcb9c568325006d48b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:24,654 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:24,654 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f5214916c71548aa87568cb9315f52c6 is 50, key is test_row_0/C:col10/1732289003510/Put/seqid=0 2024-11-22T15:23:24,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-22T15:23:24,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:24,672 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:24,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:24,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742165_1341 (size=13085) 2024-11-22T15:23:24,683 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/f5214916c71548aa87568cb9315f52c6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f5214916c71548aa87568cb9315f52c6 2024-11-22T15:23:24,692 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into f5214916c71548aa87568cb9315f52c6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:24,692 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:24,692 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732289004542; duration=0sec 2024-11-22T15:23:24,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ba0ba98431c3465bbc0329522ab5bd7a is 50, key is test_row_0/A:col10/1732289003585/Put/seqid=0 2024-11-22T15:23:24,698 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:24,698 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:24,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:24,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742166_1342 (size=12301) 2024-11-22T15:23:24,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 359 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289064747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:24,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:24,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 361 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289064849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:25,019 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/1b79cffdbf684b5fa2ef1685a19383cc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1b79cffdbf684b5fa2ef1685a19383cc 2024-11-22T15:23:25,027 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 1b79cffdbf684b5fa2ef1685a19383cc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:25,027 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:25,027 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732289004531; duration=0sec 2024-11-22T15:23:25,028 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:25,028 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:25,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 363 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289065052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:25,124 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ba0ba98431c3465bbc0329522ab5bd7a 2024-11-22T15:23:25,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d538d4cda99344fe891481af71762d67 is 50, key is test_row_0/B:col10/1732289003585/Put/seqid=0 2024-11-22T15:23:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742167_1343 (size=12301) 2024-11-22T15:23:25,178 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d538d4cda99344fe891481af71762d67 2024-11-22T15:23:25,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/908b025b95704978b738c6a7ef89520b is 50, key is test_row_0/C:col10/1732289003585/Put/seqid=0 2024-11-22T15:23:25,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742168_1344 (size=12301) 2024-11-22T15:23:25,218 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/908b025b95704978b738c6a7ef89520b 2024-11-22T15:23:25,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ba0ba98431c3465bbc0329522ab5bd7a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a 2024-11-22T15:23:25,231 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a, entries=150, sequenceid=349, filesize=12.0 K 2024-11-22T15:23:25,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d538d4cda99344fe891481af71762d67 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67 2024-11-22T15:23:25,244 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67, entries=150, sequenceid=349, filesize=12.0 K 2024-11-22T15:23:25,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/908b025b95704978b738c6a7ef89520b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b 2024-11-22T15:23:25,255 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b, entries=150, sequenceid=349, filesize=12.0 K 2024-11-22T15:23:25,256 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for db373da2a391c371f2b4fcae935e7eac in 584ms, sequenceid=349, compaction requested=false 2024-11-22T15:23:25,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:25,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-22T15:23:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-22T15:23:25,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-22T15:23:25,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2870 sec 2024-11-22T15:23:25,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.2920 sec 2024-11-22T15:23:25,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:25,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:25,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:25,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9d37099f26ef42699cd26cb652e90cbf is 50, key is test_row_0/A:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:25,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742169_1345 (size=14741) 2024-11-22T15:23:25,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 382 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289065428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:25,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 384 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289065534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:25,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 386 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289065735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:25,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9d37099f26ef42699cd26cb652e90cbf 2024-11-22T15:23:25,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7392c2e136f14b49baa40a4112755b9a is 50, key is test_row_0/B:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:25,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742170_1346 (size=12301) 2024-11-22T15:23:25,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7392c2e136f14b49baa40a4112755b9a 2024-11-22T15:23:25,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/36eaa7768dfb4941b1cf289723e6db18 is 50, key is test_row_0/C:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:25,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742171_1347 (size=12301) 2024-11-22T15:23:25,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/36eaa7768dfb4941b1cf289723e6db18 2024-11-22T15:23:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9d37099f26ef42699cd26cb652e90cbf as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf 2024-11-22T15:23:25,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf, entries=200, sequenceid=368, filesize=14.4 K 2024-11-22T15:23:25,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/7392c2e136f14b49baa40a4112755b9a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a 2024-11-22T15:23:25,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a, entries=150, sequenceid=368, filesize=12.0 K 2024-11-22T15:23:25,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/36eaa7768dfb4941b1cf289723e6db18 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18 2024-11-22T15:23:25,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18, entries=150, sequenceid=368, filesize=12.0 K 2024-11-22T15:23:25,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for db373da2a391c371f2b4fcae935e7eac in 567ms, sequenceid=368, compaction requested=true 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:25,927 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:25,927 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:25,928 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:25,928 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:25,928 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:25,928 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1b79cffdbf684b5fa2ef1685a19383cc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=39.2 K 2024-11-22T15:23:25,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:25,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:25,928 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:25,928 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/9da7d7ede3d545cdaa7daf0664aea685, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.8 K 2024-11-22T15:23:25,929 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b79cffdbf684b5fa2ef1685a19383cc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:25,929 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9da7d7ede3d545cdaa7daf0664aea685, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:25,929 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba0ba98431c3465bbc0329522ab5bd7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1732289003572 2024-11-22T15:23:25,930 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d538d4cda99344fe891481af71762d67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1732289003572 2024-11-22T15:23:25,931 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7392c2e136f14b49baa40a4112755b9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004735 2024-11-22T15:23:25,931 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d37099f26ef42699cd26cb652e90cbf, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004731 2024-11-22T15:23:25,954 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#302 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:25,954 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ca652fbe365941b0af64a11e8bb630a9 is 50, key is test_row_0/A:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:25,957 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:25,958 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/cee4e75282c54468a195594b1f652892 is 50, key is test_row_0/B:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:25,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742172_1348 (size=13187) 2024-11-22T15:23:25,999 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ca652fbe365941b0af64a11e8bb630a9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca652fbe365941b0af64a11e8bb630a9 2024-11-22T15:23:26,008 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into ca652fbe365941b0af64a11e8bb630a9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:26,008 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,008 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732289005927; duration=0sec 2024-11-22T15:23:26,008 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:26,008 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:26,008 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:26,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742173_1349 (size=13187) 2024-11-22T15:23:26,012 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:26,012 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:26,012 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:26,013 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f5214916c71548aa87568cb9315f52c6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.8 K 2024-11-22T15:23:26,013 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5214916c71548aa87568cb9315f52c6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289002861 2024-11-22T15:23:26,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 908b025b95704978b738c6a7ef89520b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1732289003572 2024-11-22T15:23:26,014 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36eaa7768dfb4941b1cf289723e6db18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004735 2024-11-22T15:23:26,022 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/cee4e75282c54468a195594b1f652892 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/cee4e75282c54468a195594b1f652892 2024-11-22T15:23:26,029 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#304 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:26,029 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b6b1424364364b2ab2312944d7d1924b is 50, key is test_row_0/C:col10/1732289005358/Put/seqid=0 2024-11-22T15:23:26,030 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into cee4e75282c54468a195594b1f652892(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:26,030 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,030 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732289005927; duration=0sec 2024-11-22T15:23:26,030 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:26,030 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:26,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:26,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742174_1350 (size=13187) 2024-11-22T15:23:26,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/e0f2f6069206498c927883bd72b37254 is 50, key is test_row_0/A:col10/1732289005422/Put/seqid=0 2024-11-22T15:23:26,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742175_1351 (size=14741) 2024-11-22T15:23:26,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/e0f2f6069206498c927883bd72b37254 2024-11-22T15:23:26,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 402 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289066097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:26,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/759e9e1f7f1c4b38954bb236230020af is 50, key is test_row_0/B:col10/1732289005422/Put/seqid=0 2024-11-22T15:23:26,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742176_1352 (size=12301) 2024-11-22T15:23:26,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 404 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289066201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:26,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 406 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289066403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:26,474 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/b6b1424364364b2ab2312944d7d1924b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b6b1424364364b2ab2312944d7d1924b 2024-11-22T15:23:26,479 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into b6b1424364364b2ab2312944d7d1924b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:26,479 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,479 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732289005927; duration=0sec 2024-11-22T15:23:26,479 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:26,479 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:26,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/759e9e1f7f1c4b38954bb236230020af 2024-11-22T15:23:26,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/98ff1d60b283469198f03c4106339dee is 50, key is test_row_0/C:col10/1732289005422/Put/seqid=0 2024-11-22T15:23:26,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742177_1353 (size=12301) 2024-11-22T15:23:26,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/98ff1d60b283469198f03c4106339dee 2024-11-22T15:23:26,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/e0f2f6069206498c927883bd72b37254 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254 2024-11-22T15:23:26,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254, entries=200, sequenceid=390, filesize=14.4 K 2024-11-22T15:23:26,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/759e9e1f7f1c4b38954bb236230020af as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af 2024-11-22T15:23:26,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af, entries=150, sequenceid=390, filesize=12.0 K 2024-11-22T15:23:26,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/98ff1d60b283469198f03c4106339dee as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee 2024-11-22T15:23:26,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee, entries=150, sequenceid=390, filesize=12.0 K 2024-11-22T15:23:26,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for db373da2a391c371f2b4fcae935e7eac in 575ms, sequenceid=390, compaction requested=false 2024-11-22T15:23:26,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:26,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:26,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/79503c4e4df64e6181b1abf1f215be0f is 50, key is test_row_0/A:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:26,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742178_1354 (size=12301) 2024-11-22T15:23:26,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/79503c4e4df64e6181b1abf1f215be0f 2024-11-22T15:23:26,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/65c1d46ea2ce43eb86131395e5631b84 is 50, key is test_row_0/B:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:26,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:26,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 425 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289066785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742179_1355 (size=12301) 2024-11-22T15:23:26,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/65c1d46ea2ce43eb86131395e5631b84 2024-11-22T15:23:26,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/41cb42d030764e999b54f839d7866fc0 is 50, key is test_row_0/C:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:26,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742180_1356 (size=12301) 2024-11-22T15:23:26,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/41cb42d030764e999b54f839d7866fc0 2024-11-22T15:23:26,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:26,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 427 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289066892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:26,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/79503c4e4df64e6181b1abf1f215be0f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f 2024-11-22T15:23:26,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:23:26,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/65c1d46ea2ce43eb86131395e5631b84 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84 2024-11-22T15:23:26,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:23:26,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/41cb42d030764e999b54f839d7866fc0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0 2024-11-22T15:23:26,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:23:26,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for db373da2a391c371f2b4fcae935e7eac in 212ms, sequenceid=408, compaction requested=true 2024-11-22T15:23:26,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:26,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:26,921 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:26,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:26,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:26,921 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:26,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:26,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:26,922 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:26,922 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:26,922 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:26,922 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:26,922 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:26,922 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:26,922 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/cee4e75282c54468a195594b1f652892, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.9 K 2024-11-22T15:23:26,922 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca652fbe365941b0af64a11e8bb630a9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=39.3 K 2024-11-22T15:23:26,923 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca652fbe365941b0af64a11e8bb630a9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004735 2024-11-22T15:23:26,923 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0f2f6069206498c927883bd72b37254, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732289005413 2024-11-22T15:23:26,923 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79503c4e4df64e6181b1abf1f215be0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:26,926 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting cee4e75282c54468a195594b1f652892, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004735 2024-11-22T15:23:26,927 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 759e9e1f7f1c4b38954bb236230020af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732289005413 2024-11-22T15:23:26,928 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 65c1d46ea2ce43eb86131395e5631b84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:26,934 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:26,935 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9690962ac14b4078a03277fc1609b4ef is 50, key is test_row_0/A:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:26,949 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:26,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/45b1ba99d2334cecaa8c2d27ed69637c is 50, key is test_row_0/B:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742181_1357 (size=13289) 2024-11-22T15:23:26,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742182_1358 (size=13289) 2024-11-22T15:23:26,990 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/45b1ba99d2334cecaa8c2d27ed69637c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/45b1ba99d2334cecaa8c2d27ed69637c 2024-11-22T15:23:26,998 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 45b1ba99d2334cecaa8c2d27ed69637c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:26,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:26,998 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=13, startTime=1732289006921; duration=0sec 2024-11-22T15:23:26,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:26,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:26,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:27,000 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:27,000 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:27,000 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,000 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b6b1424364364b2ab2312944d7d1924b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=36.9 K 2024-11-22T15:23:27,009 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b6b1424364364b2ab2312944d7d1924b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732289004735 2024-11-22T15:23:27,010 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 98ff1d60b283469198f03c4106339dee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732289005413 2024-11-22T15:23:27,010 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 41cb42d030764e999b54f839d7866fc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:27,028 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:27,029 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/73e9c6cd5d50448e9f0fdf14ef61ea42 is 50, key is test_row_0/C:col10/1732289006091/Put/seqid=0 2024-11-22T15:23:27,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742183_1359 (size=13289) 2024-11-22T15:23:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-22T15:23:27,074 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-22T15:23:27,075 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:27,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-22T15:23:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:27,084 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:27,084 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:27,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:27,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:27,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c3689eff9395421ba878a42e0bb2c3b5 is 50, key is test_row_0/A:col10/1732289006768/Put/seqid=0 2024-11-22T15:23:27,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742184_1360 (size=14741) 2024-11-22T15:23:27,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c3689eff9395421ba878a42e0bb2c3b5 2024-11-22T15:23:27,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/29a15caaf616458ba6b12bfa74ecd0b1 is 50, key is test_row_0/B:col10/1732289006768/Put/seqid=0 2024-11-22T15:23:27,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:27,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 443 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289067157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742185_1361 (size=12301) 2024-11-22T15:23:27,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/29a15caaf616458ba6b12bfa74ecd0b1 2024-11-22T15:23:27,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/3502a9e228a348549e43e516e4095e6c is 50, key is test_row_0/C:col10/1732289006768/Put/seqid=0 2024-11-22T15:23:27,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:27,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742186_1362 (size=12301) 2024-11-22T15:23:27,236 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T15:23:27,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:27,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 445 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289067260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,375 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/9690962ac14b4078a03277fc1609b4ef as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9690962ac14b4078a03277fc1609b4ef 2024-11-22T15:23:27,384 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 9690962ac14b4078a03277fc1609b4ef(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:27,384 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:27,384 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=13, startTime=1732289006921; duration=0sec 2024-11-22T15:23:27,384 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:27,384 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:27,388 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T15:23:27,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:27,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,466 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/73e9c6cd5d50448e9f0fdf14ef61ea42 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/73e9c6cd5d50448e9f0fdf14ef61ea42 2024-11-22T15:23:27,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 447 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289067464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,472 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into 73e9c6cd5d50448e9f0fdf14ef61ea42(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:27,472 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:27,472 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=13, startTime=1732289006921; duration=0sec 2024-11-22T15:23:27,472 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:27,472 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:27,546 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T15:23:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:27,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/3502a9e228a348549e43e516e4095e6c 2024-11-22T15:23:27,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/c3689eff9395421ba878a42e0bb2c3b5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5 2024-11-22T15:23:27,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5, entries=200, sequenceid=429, filesize=14.4 K 2024-11-22T15:23:27,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/29a15caaf616458ba6b12bfa74ecd0b1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1 2024-11-22T15:23:27,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1, entries=150, sequenceid=429, filesize=12.0 K 2024-11-22T15:23:27,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/3502a9e228a348549e43e516e4095e6c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c 2024-11-22T15:23:27,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c, entries=150, sequenceid=429, filesize=12.0 K 2024-11-22T15:23:27,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for db373da2a391c371f2b4fcae935e7eac in 523ms, sequenceid=429, compaction requested=false 2024-11-22T15:23:27,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:27,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-22T15:23:27,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:27,701 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:27,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:27,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:27,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:27,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:27,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b59633e5feb04739afdaf6bc14868c46 is 50, key is test_row_0/A:col10/1732289007142/Put/seqid=0 2024-11-22T15:23:27,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742187_1363 (size=12301) 2024-11-22T15:23:27,747 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b59633e5feb04739afdaf6bc14868c46 2024-11-22T15:23:27,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c46a2e4146514f38a2618aa123a636e0 is 50, key is test_row_0/B:col10/1732289007142/Put/seqid=0 2024-11-22T15:23:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:27,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:27,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742188_1364 (size=12301) 2024-11-22T15:23:27,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 466 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289067841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:27,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:27,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 468 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289067946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:28,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:28,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 470 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289068150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:28,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:28,197 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c46a2e4146514f38a2618aa123a636e0 2024-11-22T15:23:28,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/deb56ce8f6764c4fb90e54422622c8d5 is 50, key is test_row_0/C:col10/1732289007142/Put/seqid=0 2024-11-22T15:23:28,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742189_1365 (size=12301) 2024-11-22T15:23:28,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:28,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 472 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289068453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:28,654 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/deb56ce8f6764c4fb90e54422622c8d5 2024-11-22T15:23:28,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/b59633e5feb04739afdaf6bc14868c46 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46 2024-11-22T15:23:28,666 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46, entries=150, sequenceid=447, filesize=12.0 K 2024-11-22T15:23:28,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/c46a2e4146514f38a2618aa123a636e0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0 2024-11-22T15:23:28,677 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0, entries=150, sequenceid=447, filesize=12.0 K 2024-11-22T15:23:28,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/deb56ce8f6764c4fb90e54422622c8d5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5 2024-11-22T15:23:28,682 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5, entries=150, sequenceid=447, filesize=12.0 K 2024-11-22T15:23:28,683 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for db373da2a391c371f2b4fcae935e7eac in 981ms, sequenceid=447, compaction requested=true 2024-11-22T15:23:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:28,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-22T15:23:28,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-22T15:23:28,694 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-22T15:23:28,694 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6020 sec 2024-11-22T15:23:28,696 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.6190 sec 2024-11-22T15:23:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:28,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:28,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:28,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:28,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:28,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:28,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ca6ae8e122f547039f348e3ad56b59da is 50, key is test_row_0/A:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 487 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289069004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742190_1366 (size=14741) 2024-11-22T15:23:29,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 489 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289069108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-22T15:23:29,188 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-22T15:23:29,190 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:29,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-22T15:23:29,191 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:29,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:29,192 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:29,192 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:29,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:29,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:29,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 491 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289069312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ca6ae8e122f547039f348e3ad56b59da 2024-11-22T15:23:29,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/31a674939fbf4654add72cf5c7e1ac9d is 50, key is test_row_0/B:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742191_1367 (size=12301) 2024-11-22T15:23:29,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/31a674939fbf4654add72cf5c7e1ac9d 2024-11-22T15:23:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:29,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/10e5ba468a044a17b8ce27dc0cef4fc3 is 50, key is test_row_0/C:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742192_1368 (size=12301) 2024-11-22T15:23:29,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=468 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/10e5ba468a044a17b8ce27dc0cef4fc3 2024-11-22T15:23:29,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ca6ae8e122f547039f348e3ad56b59da as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da 2024-11-22T15:23:29,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da, entries=200, sequenceid=468, filesize=14.4 K 2024-11-22T15:23:29,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/31a674939fbf4654add72cf5c7e1ac9d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d 2024-11-22T15:23:29,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d, entries=150, sequenceid=468, filesize=12.0 K 2024-11-22T15:23:29,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/10e5ba468a044a17b8ce27dc0cef4fc3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3 2024-11-22T15:23:29,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3, entries=150, sequenceid=468, filesize=12.0 K 2024-11-22T15:23:29,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for db373da2a391c371f2b4fcae935e7eac in 582ms, sequenceid=468, compaction requested=true 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:29,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T15:23:29,545 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:29,546 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:29,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55072 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:29,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:29,546 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,547 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9690962ac14b4078a03277fc1609b4ef, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=53.8 K 2024-11-22T15:23:29,547 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9690962ac14b4078a03277fc1609b4ef, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:29,547 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c3689eff9395421ba878a42e0bb2c3b5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732289006739 2024-11-22T15:23:29,548 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:29,548 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:29,548 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,548 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/73e9c6cd5d50448e9f0fdf14ef61ea42, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=49.0 K 2024-11-22T15:23:29,548 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b59633e5feb04739afdaf6bc14868c46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289007131 2024-11-22T15:23:29,549 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73e9c6cd5d50448e9f0fdf14ef61ea42, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:29,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ca6ae8e122f547039f348e3ad56b59da, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007830 2024-11-22T15:23:29,549 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3502a9e228a348549e43e516e4095e6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732289006768 2024-11-22T15:23:29,549 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting deb56ce8f6764c4fb90e54422622c8d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289007131 2024-11-22T15:23:29,550 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10e5ba468a044a17b8ce27dc0cef4fc3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007835 2024-11-22T15:23:29,573 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#323 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:29,573 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/75846b749bb54b948fa7b262d5b95eae is 50, key is test_row_0/A:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,580 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:29,581 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d83dc082887244b09159828c86b716b1 is 50, key is test_row_0/C:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:29,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:29,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:29,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:29,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:29,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:29,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:29,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:29,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742194_1370 (size=13425) 2024-11-22T15:23:29,637 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d83dc082887244b09159828c86b716b1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d83dc082887244b09159828c86b716b1 2024-11-22T15:23:29,643 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into d83dc082887244b09159828c86b716b1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:29,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:29,643 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=12, startTime=1732289009545; duration=0sec 2024-11-22T15:23:29,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:29,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:29,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:29,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/32a4291981864b029f9ea677050a1708 is 50, key is test_row_0/A:col10/1732289008992/Put/seqid=0 2024-11-22T15:23:29,645 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:29,645 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:29,645 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,645 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/45b1ba99d2334cecaa8c2d27ed69637c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=49.0 K 2024-11-22T15:23:29,646 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45b1ba99d2334cecaa8c2d27ed69637c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289006079 2024-11-22T15:23:29,646 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29a15caaf616458ba6b12bfa74ecd0b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732289006768 2024-11-22T15:23:29,647 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting c46a2e4146514f38a2618aa123a636e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289007131 2024-11-22T15:23:29,647 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31a674939fbf4654add72cf5c7e1ac9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007835 2024-11-22T15:23:29,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742193_1369 (size=13425) 2024-11-22T15:23:29,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:29,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:29,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,655 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/75846b749bb54b948fa7b262d5b95eae as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/75846b749bb54b948fa7b262d5b95eae 2024-11-22T15:23:29,664 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 75846b749bb54b948fa7b262d5b95eae(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:29,664 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:29,664 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=12, startTime=1732289009545; duration=0sec 2024-11-22T15:23:29,664 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:29,664 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:29,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742195_1371 (size=14741) 2024-11-22T15:23:29,698 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#326 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:29,699 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/449fbb103bab477eb496347ad3842a62 is 50, key is test_row_0/B:col10/1732289008961/Put/seqid=0 2024-11-22T15:23:29,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 511 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289069732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742196_1372 (size=13425) 2024-11-22T15:23:29,745 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/449fbb103bab477eb496347ad3842a62 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/449fbb103bab477eb496347ad3842a62 2024-11-22T15:23:29,750 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 449fbb103bab477eb496347ad3842a62(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:29,751 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:29,751 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=12, startTime=1732289009545; duration=0sec 2024-11-22T15:23:29,751 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:29,751 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:29,804 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:29,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:29,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:29,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 513 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289069836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,957 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:29,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:29,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:30,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 515 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289070040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/32a4291981864b029f9ea677050a1708 2024-11-22T15:23:30,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1bb3d4ba852047b096b86b3786474c1d is 50, key is test_row_0/B:col10/1732289008992/Put/seqid=0 2024-11-22T15:23:30,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742197_1373 (size=12301) 2024-11-22T15:23:30,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1bb3d4ba852047b096b86b3786474c1d 2024-11-22T15:23:30,110 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:30,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:30,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:30,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/2878e838a56148c98a714250c6d78bbc is 50, key is test_row_0/C:col10/1732289008992/Put/seqid=0 2024-11-22T15:23:30,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742198_1374 (size=12301) 2024-11-22T15:23:30,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/2878e838a56148c98a714250c6d78bbc 2024-11-22T15:23:30,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/32a4291981864b029f9ea677050a1708 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708 2024-11-22T15:23:30,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708, entries=200, sequenceid=484, filesize=14.4 K 2024-11-22T15:23:30,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1bb3d4ba852047b096b86b3786474c1d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d 2024-11-22T15:23:30,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d, entries=150, sequenceid=484, filesize=12.0 K 2024-11-22T15:23:30,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/2878e838a56148c98a714250c6d78bbc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc 2024-11-22T15:23:30,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc, entries=150, sequenceid=484, filesize=12.0 K 2024-11-22T15:23:30,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for db373da2a391c371f2b4fcae935e7eac in 552ms, sequenceid=484, compaction requested=false 2024-11-22T15:23:30,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:30,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-22T15:23:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,265 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:30,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d62b26b8f7654876ac3aba544c48a070 is 50, key is test_row_0/A:col10/1732289009731/Put/seqid=0 2024-11-22T15:23:30,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742199_1375 (size=12301) 2024-11-22T15:23:30,294 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d62b26b8f7654876ac3aba544c48a070 2024-11-22T15:23:30,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:30,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/51c7736cd0694b2bbae45a7fcf554dc4 is 50, key is test_row_0/B:col10/1732289009731/Put/seqid=0 2024-11-22T15:23:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:30,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:30,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742200_1376 (size=12301) 2024-11-22T15:23:30,357 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/51c7736cd0694b2bbae45a7fcf554dc4 2024-11-22T15:23:30,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/c9b1ae0fdf8f434bab100f99519e8f93 is 50, key is test_row_0/C:col10/1732289009731/Put/seqid=0 2024-11-22T15:23:30,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 530 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289070385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742201_1377 (size=12301) 2024-11-22T15:23:30,398 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/c9b1ae0fdf8f434bab100f99519e8f93 2024-11-22T15:23:30,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/d62b26b8f7654876ac3aba544c48a070 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070 2024-11-22T15:23:30,423 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070, entries=150, sequenceid=507, filesize=12.0 K 2024-11-22T15:23:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/51c7736cd0694b2bbae45a7fcf554dc4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4 2024-11-22T15:23:30,434 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4, entries=150, sequenceid=507, filesize=12.0 K 2024-11-22T15:23:30,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/c9b1ae0fdf8f434bab100f99519e8f93 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93 2024-11-22T15:23:30,439 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93, entries=150, sequenceid=507, filesize=12.0 K 2024-11-22T15:23:30,440 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for db373da2a391c371f2b4fcae935e7eac in 175ms, sequenceid=507, compaction requested=true 2024-11-22T15:23:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-22T15:23:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-22T15:23:30,442 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-22T15:23:30,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2490 sec 2024-11-22T15:23:30,444 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.2530 sec 2024-11-22T15:23:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:30,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:23:30,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:30,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:30,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:30,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/286aff11ab5a41ca8f134cec8f2d2d2a is 50, key is test_row_0/A:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742202_1378 (size=14741) 2024-11-22T15:23:30,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/286aff11ab5a41ca8f134cec8f2d2d2a 2024-11-22T15:23:30,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/734917f9d32d428b8b1c8f74c2eb43d5 is 50, key is test_row_0/B:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 549 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289070558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742203_1379 (size=12301) 2024-11-22T15:23:30,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/734917f9d32d428b8b1c8f74c2eb43d5 2024-11-22T15:23:30,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/a605cdce1ca54d96a66e4f44a89674b3 is 50, key is test_row_0/C:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742204_1380 (size=12301) 2024-11-22T15:23:30,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/a605cdce1ca54d96a66e4f44a89674b3 2024-11-22T15:23:30,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/286aff11ab5a41ca8f134cec8f2d2d2a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a 2024-11-22T15:23:30,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a, entries=200, sequenceid=524, filesize=14.4 K 2024-11-22T15:23:30,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/734917f9d32d428b8b1c8f74c2eb43d5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5 2024-11-22T15:23:30,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5, entries=150, sequenceid=524, filesize=12.0 K 2024-11-22T15:23:30,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/a605cdce1ca54d96a66e4f44a89674b3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3 2024-11-22T15:23:30,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3, entries=150, sequenceid=524, filesize=12.0 K 2024-11-22T15:23:30,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for db373da2a391c371f2b4fcae935e7eac in 144ms, sequenceid=524, compaction requested=true 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:30,637 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db373da2a391c371f2b4fcae935e7eac:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:30,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:30,637 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55208 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/A is initiating minor compaction (all files) 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/B is initiating minor compaction (all files) 2024-11-22T15:23:30,638 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/A in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,638 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/B in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,638 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/75846b749bb54b948fa7b262d5b95eae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=53.9 K 2024-11-22T15:23:30,638 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/449fbb103bab477eb496347ad3842a62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=49.1 K 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75846b749bb54b948fa7b262d5b95eae, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007835 2024-11-22T15:23:30,638 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 449fbb103bab477eb496347ad3842a62, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007835 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32a4291981864b029f9ea677050a1708, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732289008992 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bb3d4ba852047b096b86b3786474c1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732289008992 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 51c7736cd0694b2bbae45a7fcf554dc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732289009722 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d62b26b8f7654876ac3aba544c48a070, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732289009722 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 286aff11ab5a41ca8f134cec8f2d2d2a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732289010374 2024-11-22T15:23:30,639 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 734917f9d32d428b8b1c8f74c2eb43d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732289010374 2024-11-22T15:23:30,665 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#A#compaction#335 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:30,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:30,665 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/0ff725e39e71430e8307b6496abdae0c is 50, key is test_row_0/A:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:30,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:30,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:30,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:30,685 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#B#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:30,686 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/04035a5224814b03b5c9327478518fc2 is 50, key is test_row_0/B:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/4f8190842f924a56944138efd2d8c724 is 50, key is test_row_0/A:col10/1732289010549/Put/seqid=0 2024-11-22T15:23:30,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 565 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289070735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:30,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742205_1381 (size=13561) 2024-11-22T15:23:30,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742206_1382 (size=13561) 2024-11-22T15:23:30,755 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/04035a5224814b03b5c9327478518fc2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/04035a5224814b03b5c9327478518fc2 2024-11-22T15:23:30,760 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/B of db373da2a391c371f2b4fcae935e7eac into 04035a5224814b03b5c9327478518fc2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:30,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:30,760 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/B, priority=12, startTime=1732289010637; duration=0sec 2024-11-22T15:23:30,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:30,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:B 2024-11-22T15:23:30,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:30,762 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:30,762 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): db373da2a391c371f2b4fcae935e7eac/C is initiating minor compaction (all files) 2024-11-22T15:23:30,762 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of db373da2a391c371f2b4fcae935e7eac/C in TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:30,763 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d83dc082887244b09159828c86b716b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp, totalSize=49.1 K 2024-11-22T15:23:30,764 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d83dc082887244b09159828c86b716b1, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=468, earliestPutTs=1732289007835 2024-11-22T15:23:30,764 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2878e838a56148c98a714250c6d78bbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1732289008992 2024-11-22T15:23:30,764 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c9b1ae0fdf8f434bab100f99519e8f93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732289009722 2024-11-22T15:23:30,766 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a605cdce1ca54d96a66e4f44a89674b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732289010374 2024-11-22T15:23:30,781 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): db373da2a391c371f2b4fcae935e7eac#C#compaction#338 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:30,781 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d70530754ef640698adeb59cad824584 is 50, key is test_row_0/C:col10/1732289010383/Put/seqid=0 2024-11-22T15:23:30,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742207_1383 (size=14741) 2024-11-22T15:23:30,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742208_1384 (size=13561) 2024-11-22T15:23:30,826 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/d70530754ef640698adeb59cad824584 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d70530754ef640698adeb59cad824584 2024-11-22T15:23:30,832 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/C of db373da2a391c371f2b4fcae935e7eac into d70530754ef640698adeb59cad824584(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:30,832 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:30,833 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/C, priority=12, startTime=1732289010637; duration=0sec 2024-11-22T15:23:30,833 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:30,833 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:C 2024-11-22T15:23:30,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:30,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 567 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289070839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 569 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289071044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,144 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/0ff725e39e71430e8307b6496abdae0c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/0ff725e39e71430e8307b6496abdae0c 2024-11-22T15:23:31,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46934 deadline: 1732289071144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,145 DEBUG [Thread-1237 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18259 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:31,147 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in db373da2a391c371f2b4fcae935e7eac/A of db373da2a391c371f2b4fcae935e7eac into 0ff725e39e71430e8307b6496abdae0c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:31,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:31,147 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., storeName=db373da2a391c371f2b4fcae935e7eac/A, priority=12, startTime=1732289010637; duration=0sec 2024-11-22T15:23:31,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:31,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db373da2a391c371f2b4fcae935e7eac:A 2024-11-22T15:23:31,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46910 deadline: 1732289071148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,151 DEBUG [Thread-1239 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18260 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:31,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46900 deadline: 1732289071171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46916 deadline: 1732289071171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,172 DEBUG [Thread-1235 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18287 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:31,173 DEBUG [Thread-1231 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18281 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:31,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/4f8190842f924a56944138efd2d8c724 2024-11-22T15:23:31,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d00fdb6e093745538a400a8f35efa704 is 50, key is test_row_0/B:col10/1732289010549/Put/seqid=0 2024-11-22T15:23:31,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742209_1385 (size=12301) 2024-11-22T15:23:31,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-22T15:23:31,298 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-22T15:23:31,299 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:31,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-22T15:23:31,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:31,300 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:31,300 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:31,300 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 571 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289071346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:31,451 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T15:23:31,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:31,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d00fdb6e093745538a400a8f35efa704 2024-11-22T15:23:31,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/578305f810e74cce9e7956371ef1f5b5 is 50, key is test_row_0/C:col10/1732289010549/Put/seqid=0 2024-11-22T15:23:31,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:31,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742210_1386 (size=12301) 2024-11-22T15:23:31,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T15:23:31,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:31,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T15:23:31,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:31,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:31,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 573 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46886 deadline: 1732289071849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:31,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:31,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T15:23:31,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:31,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:31,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:32,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=544 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/578305f810e74cce9e7956371ef1f5b5 2024-11-22T15:23:32,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/4f8190842f924a56944138efd2d8c724 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/4f8190842f924a56944138efd2d8c724 2024-11-22T15:23:32,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/4f8190842f924a56944138efd2d8c724, entries=200, sequenceid=544, filesize=14.4 K 2024-11-22T15:23:32,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/d00fdb6e093745538a400a8f35efa704 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d00fdb6e093745538a400a8f35efa704 2024-11-22T15:23:32,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d00fdb6e093745538a400a8f35efa704, entries=150, sequenceid=544, filesize=12.0 K 2024-11-22T15:23:32,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/578305f810e74cce9e7956371ef1f5b5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/578305f810e74cce9e7956371ef1f5b5 2024-11-22T15:23:32,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/578305f810e74cce9e7956371ef1f5b5, entries=150, sequenceid=544, filesize=12.0 K 2024-11-22T15:23:32,018 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for db373da2a391c371f2b4fcae935e7eac in 1353ms, sequenceid=544, compaction requested=false 2024-11-22T15:23:32,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:32,060 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:32,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-22T15:23:32,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:32,061 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:32,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ae81afbd112e4032a86d1f86225e289a is 50, key is test_row_0/A:col10/1732289010727/Put/seqid=0 2024-11-22T15:23:32,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742211_1387 (size=12301) 2024-11-22T15:23:32,075 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ae81afbd112e4032a86d1f86225e289a 2024-11-22T15:23:32,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/055bccfb9c4b4c819e5729137efd7ced is 50, key is test_row_0/B:col10/1732289010727/Put/seqid=0 2024-11-22T15:23:32,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742212_1388 (size=12301) 2024-11-22T15:23:32,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:32,487 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/055bccfb9c4b4c819e5729137efd7ced 2024-11-22T15:23:32,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0631318acff64c83b023ae4cda53324e is 50, key is test_row_0/C:col10/1732289010727/Put/seqid=0 2024-11-22T15:23:32,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742213_1389 (size=12301) 2024-11-22T15:23:32,549 DEBUG [Thread-1250 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:52970 2024-11-22T15:23:32,549 DEBUG [Thread-1250 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,549 DEBUG [Thread-1244 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:52970 2024-11-22T15:23:32,549 DEBUG [Thread-1244 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,550 DEBUG [Thread-1248 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:52970 2024-11-22T15:23:32,550 DEBUG [Thread-1248 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,550 DEBUG [Thread-1246 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:52970 2024-11-22T15:23:32,550 DEBUG [Thread-1246 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,550 DEBUG [Thread-1242 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:52970 2024-11-22T15:23:32,551 DEBUG [Thread-1242 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. as already flushing 2024-11-22T15:23:32,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:32,865 DEBUG [Thread-1233 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:52970 2024-11-22T15:23:32,865 DEBUG [Thread-1233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:32,922 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0631318acff64c83b023ae4cda53324e 2024-11-22T15:23:32,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/ae81afbd112e4032a86d1f86225e289a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ae81afbd112e4032a86d1f86225e289a 2024-11-22T15:23:32,936 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ae81afbd112e4032a86d1f86225e289a, entries=150, sequenceid=563, filesize=12.0 K 2024-11-22T15:23:32,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/055bccfb9c4b4c819e5729137efd7ced as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/055bccfb9c4b4c819e5729137efd7ced 2024-11-22T15:23:32,940 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/055bccfb9c4b4c819e5729137efd7ced, entries=150, sequenceid=563, filesize=12.0 K 2024-11-22T15:23:32,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/0631318acff64c83b023ae4cda53324e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0631318acff64c83b023ae4cda53324e 2024-11-22T15:23:32,944 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0631318acff64c83b023ae4cda53324e, entries=150, sequenceid=563, filesize=12.0 K 2024-11-22T15:23:32,945 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=6.71 KB/6870 for db373da2a391c371f2b4fcae935e7eac in 884ms, sequenceid=563, compaction requested=true 2024-11-22T15:23:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:32,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-22T15:23:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-22T15:23:32,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-22T15:23:32,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6460 sec 2024-11-22T15:23:32,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.6490 sec 2024-11-22T15:23:33,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-22T15:23:33,405 INFO [Thread-1241 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-22T15:23:41,209 DEBUG [Thread-1239 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:52970 2024-11-22T15:23:41,209 DEBUG [Thread-1239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:41,211 DEBUG [Thread-1237 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:52970 2024-11-22T15:23:41,211 DEBUG [Thread-1237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:41,217 DEBUG [Thread-1235 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:52970 2024-11-22T15:23:41,217 DEBUG [Thread-1235 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:41,250 DEBUG [Thread-1231 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:52970 2024-11-22T15:23:41,250 DEBUG [Thread-1231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 5 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 414 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 4 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4442 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4300 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4158 2024-11-22T15:23:41,251 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4400 2024-11-22T15:23:41,252 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4230 2024-11-22T15:23:41,252 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:23:41,252 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:23:41,252 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f66057f to 127.0.0.1:52970 2024-11-22T15:23:41,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:23:41,253 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:23:41,254 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:23:41,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:41,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:41,259 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289021258"}]},"ts":"1732289021258"} 2024-11-22T15:23:41,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:23:41,276 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:23:41,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:23:41,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, UNASSIGN}] 2024-11-22T15:23:41,278 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, UNASSIGN 2024-11-22T15:23:41,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=db373da2a391c371f2b4fcae935e7eac, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:41,280 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:23:41,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:41,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:41,432 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:41,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:23:41,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing db373da2a391c371f2b4fcae935e7eac, disabling compactions & flushes 2024-11-22T15:23:41,432 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:41,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:41,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. after waiting 0 ms 2024-11-22T15:23:41,432 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:41,432 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing db373da2a391c371f2b4fcae935e7eac 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=A 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=B 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK db373da2a391c371f2b4fcae935e7eac, store=C 2024-11-22T15:23:41,433 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:41,437 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/654ede07ddd24de288074224dd8df625 is 50, key is test_row_0/A:col10/1732289021248/Put/seqid=0 2024-11-22T15:23:41,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742214_1390 (size=9857) 2024-11-22T15:23:41,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:41,843 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/654ede07ddd24de288074224dd8df625 2024-11-22T15:23:41,855 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1b72e9fc36924123ae7c409445f91db2 is 50, key is test_row_0/B:col10/1732289021248/Put/seqid=0 2024-11-22T15:23:41,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742215_1391 (size=9857) 2024-11-22T15:23:41,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:42,258 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1b72e9fc36924123ae7c409445f91db2 2024-11-22T15:23:42,263 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/aed19008085f421783f71e2c066a0bd6 is 50, key is test_row_0/C:col10/1732289021248/Put/seqid=0 2024-11-22T15:23:42,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742216_1392 (size=9857) 2024-11-22T15:23:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:42,667 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/aed19008085f421783f71e2c066a0bd6 2024-11-22T15:23:42,674 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/A/654ede07ddd24de288074224dd8df625 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/654ede07ddd24de288074224dd8df625 2024-11-22T15:23:42,679 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/654ede07ddd24de288074224dd8df625, entries=100, sequenceid=571, filesize=9.6 K 2024-11-22T15:23:42,680 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/B/1b72e9fc36924123ae7c409445f91db2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1b72e9fc36924123ae7c409445f91db2 2024-11-22T15:23:42,684 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1b72e9fc36924123ae7c409445f91db2, entries=100, sequenceid=571, filesize=9.6 K 2024-11-22T15:23:42,685 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/.tmp/C/aed19008085f421783f71e2c066a0bd6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/aed19008085f421783f71e2c066a0bd6 2024-11-22T15:23:42,689 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/aed19008085f421783f71e2c066a0bd6, entries=100, sequenceid=571, filesize=9.6 K 2024-11-22T15:23:42,689 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for db373da2a391c371f2b4fcae935e7eac in 1257ms, sequenceid=571, compaction requested=true 2024-11-22T15:23:42,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/12ea23a3a3f9461e90a558c26caf9080, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b2041c95b2704383b2453f31ac3f1feb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/165ff4519f004aa2a51f14567d1cbd1c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/5b72644bed8e4b9d9a4e44fb1c7cfea7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/803d42be51c141dbb2954bac00793ac0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ac624757eb554caf9629c4fdb1231ea8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1b79cffdbf684b5fa2ef1685a19383cc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca652fbe365941b0af64a11e8bb630a9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9690962ac14b4078a03277fc1609b4ef, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/75846b749bb54b948fa7b262d5b95eae, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a] to archive 2024-11-22T15:23:42,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:42,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d7e090bbcee240d9b31394f4c6758583 2024-11-22T15:23:42,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9db61d1cba43444ebdbe7ffee752a1c1 2024-11-22T15:23:42,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/12ea23a3a3f9461e90a558c26caf9080 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/12ea23a3a3f9461e90a558c26caf9080 2024-11-22T15:23:42,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/51aceac83fa840b68e231d5a7c4402a0 2024-11-22T15:23:42,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b7b9b93f13c64126a9785ff3ccada160 2024-11-22T15:23:42,702 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b2041c95b2704383b2453f31ac3f1feb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b2041c95b2704383b2453f31ac3f1feb 2024-11-22T15:23:42,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1a5f07f32de74bc3b1fc17b1c80b4623 2024-11-22T15:23:42,704 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/97ad3f531c7746b6b1ebf507a7ca3c9b 2024-11-22T15:23:42,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/165ff4519f004aa2a51f14567d1cbd1c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/165ff4519f004aa2a51f14567d1cbd1c 2024-11-22T15:23:42,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/92f85a5613db44a18bef6cfa6d9ba96e 2024-11-22T15:23:42,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9a0f8e85e0d041b5b177d010d2f2a35d 2024-11-22T15:23:42,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/748ed977858647fd93373f6dcc2bed46 2024-11-22T15:23:42,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/5b72644bed8e4b9d9a4e44fb1c7cfea7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/5b72644bed8e4b9d9a4e44fb1c7cfea7 2024-11-22T15:23:42,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6522ba52c9104e9fb3aa52f04406841c 2024-11-22T15:23:42,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/fb2a2ed675fd4ab0941fc8449ed3728a 2024-11-22T15:23:42,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/83734b1362474e8694141671d4028866 2024-11-22T15:23:42,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/803d42be51c141dbb2954bac00793ac0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/803d42be51c141dbb2954bac00793ac0 2024-11-22T15:23:42,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c70f798a71b44c4d92253e8af774ea4a 2024-11-22T15:23:42,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/dde44970600d4a768aa654cdbbabdaa6 2024-11-22T15:23:42,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/6e822e73384c4315934caf3b557368fc 2024-11-22T15:23:42,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ac624757eb554caf9629c4fdb1231ea8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ac624757eb554caf9629c4fdb1231ea8 2024-11-22T15:23:42,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/55623d1cfac54b3eb9761e6932907718 2024-11-22T15:23:42,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1b79cffdbf684b5fa2ef1685a19383cc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/1b79cffdbf684b5fa2ef1685a19383cc 2024-11-22T15:23:42,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/485fb5b2233d4386a3e80b7c5ab86311 2024-11-22T15:23:42,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ba0ba98431c3465bbc0329522ab5bd7a 2024-11-22T15:23:42,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9d37099f26ef42699cd26cb652e90cbf 2024-11-22T15:23:42,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca652fbe365941b0af64a11e8bb630a9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca652fbe365941b0af64a11e8bb630a9 2024-11-22T15:23:42,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/e0f2f6069206498c927883bd72b37254 2024-11-22T15:23:42,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9690962ac14b4078a03277fc1609b4ef to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/9690962ac14b4078a03277fc1609b4ef 2024-11-22T15:23:42,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/79503c4e4df64e6181b1abf1f215be0f 2024-11-22T15:23:42,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/c3689eff9395421ba878a42e0bb2c3b5 2024-11-22T15:23:42,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/b59633e5feb04739afdaf6bc14868c46 2024-11-22T15:23:42,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ca6ae8e122f547039f348e3ad56b59da 2024-11-22T15:23:42,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/75846b749bb54b948fa7b262d5b95eae to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/75846b749bb54b948fa7b262d5b95eae 2024-11-22T15:23:42,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/32a4291981864b029f9ea677050a1708 2024-11-22T15:23:42,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/d62b26b8f7654876ac3aba544c48a070 2024-11-22T15:23:42,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/286aff11ab5a41ca8f134cec8f2d2d2a 2024-11-22T15:23:42,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/8a62b9222dd841e5a94ac0db45cc4f1b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1c8366eab854df4ba34bfff12b3356c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d1e80a1d4252476e9b8ece586587807b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/0e4701a5f95e4881a4cccc519f2703a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29f5540501f5404fbd45a11d16aba0d9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/907bd924ee3741bab234c7dade422657, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/9da7d7ede3d545cdaa7daf0664aea685, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/cee4e75282c54468a195594b1f652892, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/45b1ba99d2334cecaa8c2d27ed69637c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/449fbb103bab477eb496347ad3842a62, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5] to archive 2024-11-22T15:23:42,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:42,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/5bdf946b880e4c2e9b97872e4716abb4 2024-11-22T15:23:42,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/4bc37beddd944264831d0930f87c787d 2024-11-22T15:23:42,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/8a62b9222dd841e5a94ac0db45cc4f1b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/8a62b9222dd841e5a94ac0db45cc4f1b 2024-11-22T15:23:42,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/3df2f4feb3a540f284640e17566e3e77 2024-11-22T15:23:42,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/ae6a79a0abda48b4879571aa413e3737 2024-11-22T15:23:42,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1c8366eab854df4ba34bfff12b3356c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1c8366eab854df4ba34bfff12b3356c 2024-11-22T15:23:42,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c6e2361ddda8489781fd9a2acc85751e 2024-11-22T15:23:42,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/f1869d1bea6f47caa0b84c4abb43a0a1 2024-11-22T15:23:42,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d1e80a1d4252476e9b8ece586587807b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d1e80a1d4252476e9b8ece586587807b 2024-11-22T15:23:42,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7710d4c63d5c428486b92ce26de7ca5f 2024-11-22T15:23:42,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bb25e3f90cf14bedbf7aa3f15db28bb8 2024-11-22T15:23:42,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/0e4701a5f95e4881a4cccc519f2703a1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/0e4701a5f95e4881a4cccc519f2703a1 2024-11-22T15:23:42,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/379074cd271d42ab9b77650eca1ef142 2024-11-22T15:23:42,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/124fcd6eda764915b5a23d6795e6afe2 2024-11-22T15:23:42,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7e54ab0930d54201818967720bb8ae79 2024-11-22T15:23:42,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29f5540501f5404fbd45a11d16aba0d9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29f5540501f5404fbd45a11d16aba0d9 2024-11-22T15:23:42,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d13e6fdefac44addbd3e9c87e17aabf5 2024-11-22T15:23:42,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d338f9e2f4b8458bafee9ed934a5cc05 2024-11-22T15:23:42,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/bc18ee80de014ddfa1be627533faa96c 2024-11-22T15:23:42,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/907bd924ee3741bab234c7dade422657 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/907bd924ee3741bab234c7dade422657 2024-11-22T15:23:42,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/2ec15a0a5f274a53931ddb6e61f73e58 2024-11-22T15:23:42,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7a8b93dc5a8841bc94a31d8f6b884ace 2024-11-22T15:23:42,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/9da7d7ede3d545cdaa7daf0664aea685 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/9da7d7ede3d545cdaa7daf0664aea685 2024-11-22T15:23:42,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/231c8b602e4b4e5aad5e554da3f23808 2024-11-22T15:23:42,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d538d4cda99344fe891481af71762d67 2024-11-22T15:23:42,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/cee4e75282c54468a195594b1f652892 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/cee4e75282c54468a195594b1f652892 2024-11-22T15:23:42,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/7392c2e136f14b49baa40a4112755b9a 2024-11-22T15:23:42,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/759e9e1f7f1c4b38954bb236230020af 2024-11-22T15:23:42,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/45b1ba99d2334cecaa8c2d27ed69637c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/45b1ba99d2334cecaa8c2d27ed69637c 2024-11-22T15:23:42,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/65c1d46ea2ce43eb86131395e5631b84 2024-11-22T15:23:42,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/29a15caaf616458ba6b12bfa74ecd0b1 2024-11-22T15:23:42,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/c46a2e4146514f38a2618aa123a636e0 2024-11-22T15:23:42,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/449fbb103bab477eb496347ad3842a62 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/449fbb103bab477eb496347ad3842a62 2024-11-22T15:23:42,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/31a674939fbf4654add72cf5c7e1ac9d 2024-11-22T15:23:42,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1bb3d4ba852047b096b86b3786474c1d 2024-11-22T15:23:42,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/51c7736cd0694b2bbae45a7fcf554dc4 2024-11-22T15:23:42,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/734917f9d32d428b8b1c8f74c2eb43d5 2024-11-22T15:23:42,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/357ba40cf59b4f8fb5769faba365c32f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/35d73175da404be2a0c8484e4712f647, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a307250c7dbc4baca40524ec2bc1f45b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/102b1e5faca34e3888644ac750912cc9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f9e24c6436634b04a398ad0c7e3e7c13, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e825f40dce584b6f98b703548b5eb9bd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f5214916c71548aa87568cb9315f52c6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b6b1424364364b2ab2312944d7d1924b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/73e9c6cd5d50448e9f0fdf14ef61ea42, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d83dc082887244b09159828c86b716b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3] to archive 2024-11-22T15:23:42,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:42,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f856d5026f53422baebe11109765cc0a 2024-11-22T15:23:42,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e2fd80c6a29b4e33abfd8e0db5aa6085 2024-11-22T15:23:42,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/357ba40cf59b4f8fb5769faba365c32f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/357ba40cf59b4f8fb5769faba365c32f 2024-11-22T15:23:42,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d7f36ff1744e4fbead358a37bcc3409a 2024-11-22T15:23:42,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b314895745da455182573f930aa026d1 2024-11-22T15:23:42,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/35d73175da404be2a0c8484e4712f647 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/35d73175da404be2a0c8484e4712f647 2024-11-22T15:23:42,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/cc90788a998d4dd18bd6e7bdb2a45bbb 2024-11-22T15:23:42,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/8a94163c11ce448495dd4e1837d99b28 2024-11-22T15:23:42,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a307250c7dbc4baca40524ec2bc1f45b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a307250c7dbc4baca40524ec2bc1f45b 2024-11-22T15:23:42,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/44772458fc9e471e98f772eee58c8a40 2024-11-22T15:23:42,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/fe8c30f83f034de882ba003915e7d53c 2024-11-22T15:23:42,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/102b1e5faca34e3888644ac750912cc9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/102b1e5faca34e3888644ac750912cc9 2024-11-22T15:23:42,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4af4a91e2c0448abb3a342617a5e0ea6 2024-11-22T15:23:42,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/4dec7fb5eba747408dc1e1da95f786b6 2024-11-22T15:23:42,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b3ea63ea34b44498a9734a1e5c317088 2024-11-22T15:23:42,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f9e24c6436634b04a398ad0c7e3e7c13 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f9e24c6436634b04a398ad0c7e3e7c13 2024-11-22T15:23:42,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/bb16cf7a24614bc69a1bb674600faa6f 2024-11-22T15:23:42,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/56df87490711428fbed196971dbec570 2024-11-22T15:23:42,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/39aaf70d16984785874bd5c9eb6e5f8f 2024-11-22T15:23:42,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e825f40dce584b6f98b703548b5eb9bd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/e825f40dce584b6f98b703548b5eb9bd 2024-11-22T15:23:42,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0de77d2821eb47edb504ad8a6330518b 2024-11-22T15:23:42,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/40d6145fee874e1d86b48b91e8b1283a 2024-11-22T15:23:42,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f5214916c71548aa87568cb9315f52c6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/f5214916c71548aa87568cb9315f52c6 2024-11-22T15:23:42,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/61546be3a9a040bcb9c568325006d48b 2024-11-22T15:23:42,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/908b025b95704978b738c6a7ef89520b 2024-11-22T15:23:42,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b6b1424364364b2ab2312944d7d1924b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/b6b1424364364b2ab2312944d7d1924b 2024-11-22T15:23:42,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/36eaa7768dfb4941b1cf289723e6db18 2024-11-22T15:23:42,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/98ff1d60b283469198f03c4106339dee 2024-11-22T15:23:42,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/73e9c6cd5d50448e9f0fdf14ef61ea42 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/73e9c6cd5d50448e9f0fdf14ef61ea42 2024-11-22T15:23:42,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/41cb42d030764e999b54f839d7866fc0 2024-11-22T15:23:42,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/3502a9e228a348549e43e516e4095e6c 2024-11-22T15:23:42,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/deb56ce8f6764c4fb90e54422622c8d5 2024-11-22T15:23:42,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d83dc082887244b09159828c86b716b1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d83dc082887244b09159828c86b716b1 2024-11-22T15:23:42,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/10e5ba468a044a17b8ce27dc0cef4fc3 2024-11-22T15:23:42,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/2878e838a56148c98a714250c6d78bbc 2024-11-22T15:23:42,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/c9b1ae0fdf8f434bab100f99519e8f93 2024-11-22T15:23:42,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/a605cdce1ca54d96a66e4f44a89674b3 2024-11-22T15:23:42,792 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/recovered.edits/574.seqid, newMaxSeqId=574, maxSeqId=1 2024-11-22T15:23:42,792 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac. 2024-11-22T15:23:42,793 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for db373da2a391c371f2b4fcae935e7eac: 2024-11-22T15:23:42,794 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:42,794 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=db373da2a391c371f2b4fcae935e7eac, regionState=CLOSED 2024-11-22T15:23:42,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-22T15:23:42,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure db373da2a391c371f2b4fcae935e7eac, server=77927f992d0b,36033,1732288915809 in 1.5150 sec 2024-11-22T15:23:42,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-22T15:23:42,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=db373da2a391c371f2b4fcae935e7eac, UNASSIGN in 1.5180 sec 2024-11-22T15:23:42,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-22T15:23:42,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5210 sec 2024-11-22T15:23:42,798 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289022798"}]},"ts":"1732289022798"} 2024-11-22T15:23:42,799 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:23:42,809 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:23:42,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5560 sec 2024-11-22T15:23:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-22T15:23:43,365 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-22T15:23:43,366 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:23:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,369 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,370 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T15:23:43,375 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:43,381 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/recovered.edits] 2024-11-22T15:23:43,386 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/0ff725e39e71430e8307b6496abdae0c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/0ff725e39e71430e8307b6496abdae0c 2024-11-22T15:23:43,387 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/4f8190842f924a56944138efd2d8c724 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/4f8190842f924a56944138efd2d8c724 2024-11-22T15:23:43,389 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/654ede07ddd24de288074224dd8df625 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/654ede07ddd24de288074224dd8df625 2024-11-22T15:23:43,391 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ae81afbd112e4032a86d1f86225e289a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/A/ae81afbd112e4032a86d1f86225e289a 2024-11-22T15:23:43,394 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/04035a5224814b03b5c9327478518fc2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/04035a5224814b03b5c9327478518fc2 2024-11-22T15:23:43,396 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/055bccfb9c4b4c819e5729137efd7ced to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/055bccfb9c4b4c819e5729137efd7ced 2024-11-22T15:23:43,397 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1b72e9fc36924123ae7c409445f91db2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/1b72e9fc36924123ae7c409445f91db2 2024-11-22T15:23:43,399 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d00fdb6e093745538a400a8f35efa704 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/B/d00fdb6e093745538a400a8f35efa704 2024-11-22T15:23:43,401 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0631318acff64c83b023ae4cda53324e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/0631318acff64c83b023ae4cda53324e 2024-11-22T15:23:43,403 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/578305f810e74cce9e7956371ef1f5b5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/578305f810e74cce9e7956371ef1f5b5 2024-11-22T15:23:43,405 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/aed19008085f421783f71e2c066a0bd6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/aed19008085f421783f71e2c066a0bd6 2024-11-22T15:23:43,406 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d70530754ef640698adeb59cad824584 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/C/d70530754ef640698adeb59cad824584 2024-11-22T15:23:43,410 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/recovered.edits/574.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac/recovered.edits/574.seqid 2024-11-22T15:23:43,411 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/db373da2a391c371f2b4fcae935e7eac 2024-11-22T15:23:43,411 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:23:43,414 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,416 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:23:43,418 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:23:43,420 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,420 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:23:43,420 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732289023420"}]},"ts":"9223372036854775807"} 2024-11-22T15:23:43,423 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:23:43,423 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => db373da2a391c371f2b4fcae935e7eac, NAME => 'TestAcidGuarantees,,1732288989979.db373da2a391c371f2b4fcae935e7eac.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:23:43,423 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:23:43,423 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732289023423"}]},"ts":"9223372036854775807"} 2024-11-22T15:23:43,425 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:23:43,469 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 103 msec 2024-11-22T15:23:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-22T15:23:43,473 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-22T15:23:43,486 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 237), OpenFileDescriptor=445 (was 449), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=660 (was 666), ProcessCount=11 (was 11), AvailableMemoryMB=4155 (was 4262) 2024-11-22T15:23:43,495 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=660, ProcessCount=11, AvailableMemoryMB=4155 2024-11-22T15:23:43,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:23:43,497 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:23:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:43,499 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:23:43,499 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:43,499 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-11-22T15:23:43,500 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:23:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742217_1393 (size=963) 2024-11-22T15:23:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:43,909 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:23:43,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742218_1394 (size=53) 2024-11-22T15:23:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 661abb5eb8be4eaf4f236a86a23909c7, disabling compactions & flushes 2024-11-22T15:23:44,320 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. after waiting 0 ms 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,320 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:44,322 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:23:44,323 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732289024322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732289024322"}]},"ts":"1732289024322"} 2024-11-22T15:23:44,325 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:23:44,326 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:23:44,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289024326"}]},"ts":"1732289024326"} 2024-11-22T15:23:44,328 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:23:44,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, ASSIGN}] 2024-11-22T15:23:44,352 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, ASSIGN 2024-11-22T15:23:44,353 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:23:44,504 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:44,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:44,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:44,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:44,664 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,664 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:23:44,664 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,664 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:23:44,665 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,665 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,667 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,670 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:44,670 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName A 2024-11-22T15:23:44,671 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:44,672 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:44,672 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,673 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:44,674 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName B 2024-11-22T15:23:44,674 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:44,675 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:44,675 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,676 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:44,676 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName C 2024-11-22T15:23:44,676 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:44,677 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:44,677 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,678 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,679 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,681 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:23:44,683 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:44,685 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:23:44,686 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened 661abb5eb8be4eaf4f236a86a23909c7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68270442, jitterRate=0.017308861017227173}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:23:44,687 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:44,687 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., pid=96, masterSystemTime=1732289024659 2024-11-22T15:23:44,689 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,689 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:44,689 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:44,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-22T15:23:44,692 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 in 184 msec 2024-11-22T15:23:44,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-22T15:23:44,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, ASSIGN in 341 msec 2024-11-22T15:23:44,694 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:23:44,694 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289024694"}]},"ts":"1732289024694"} 2024-11-22T15:23:44,695 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:23:44,744 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:23:44,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2470 sec 2024-11-22T15:23:45,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-22T15:23:45,610 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-11-22T15:23:45,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f7f772a to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b976e1a 2024-11-22T15:23:45,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df61dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:45,653 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:45,654 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:45,655 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:23:45,656 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:23:45,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:23:45,658 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:23:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T15:23:45,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742219_1395 (size=999) 2024-11-22T15:23:46,068 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T15:23:46,068 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T15:23:46,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:23:46,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, REOPEN/MOVE}] 2024-11-22T15:23:46,072 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, REOPEN/MOVE 2024-11-22T15:23:46,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,073 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:23:46,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:46,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,224 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,224 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:23:46,225 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing 661abb5eb8be4eaf4f236a86a23909c7, disabling compactions & flushes 2024-11-22T15:23:46,225 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,225 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,225 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. after waiting 0 ms 2024-11-22T15:23:46,225 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,252 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T15:23:46,253 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,253 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:46,253 WARN [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: 661abb5eb8be4eaf4f236a86a23909c7 to self. 2024-11-22T15:23:46,254 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=CLOSED 2024-11-22T15:23:46,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-22T15:23:46,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 in 182 msec 2024-11-22T15:23:46,256 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, REOPEN/MOVE; state=CLOSED, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=true 2024-11-22T15:23:46,407 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:23:46,559 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,561 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,561 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:23:46,562 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,562 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:23:46,562 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,562 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,563 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,564 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:46,564 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName A 2024-11-22T15:23:46,565 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:46,566 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:46,566 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,567 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:46,567 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName B 2024-11-22T15:23:46,567 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:46,567 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:46,568 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,568 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:23:46,568 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 661abb5eb8be4eaf4f236a86a23909c7 columnFamilyName C 2024-11-22T15:23:46,568 DEBUG [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:46,569 INFO [StoreOpener-661abb5eb8be4eaf4f236a86a23909c7-1 {}] regionserver.HStore(327): Store=661abb5eb8be4eaf4f236a86a23909c7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:23:46,569 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,569 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,570 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,571 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:23:46,572 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,574 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened 661abb5eb8be4eaf4f236a86a23909c7; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66761650, jitterRate=-0.00517389178276062}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:23:46,575 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:46,575 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., pid=101, masterSystemTime=1732289026559 2024-11-22T15:23:46,576 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,576 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,577 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=OPEN, openSeqNum=5, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-11-22T15:23:46,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 in 169 msec 2024-11-22T15:23:46,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-22T15:23:46,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, REOPEN/MOVE in 507 msec 2024-11-22T15:23:46,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-22T15:23:46,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 509 msec 2024-11-22T15:23:46,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 923 msec 2024-11-22T15:23:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-22T15:23:46,583 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bf5e2f0 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b82ba2a 2024-11-22T15:23:46,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3637e4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,612 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-11-22T15:23:46,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,628 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-11-22T15:23:46,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,636 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-11-22T15:23:46,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,650 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-11-22T15:23:46,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,661 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-11-22T15:23:46,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-11-22T15:23:46,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,684 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-11-22T15:23:46,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,694 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-11-22T15:23:46,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,702 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-11-22T15:23:46,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:23:46,720 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:46,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-22T15:23:46,722 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:46,722 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:46,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:46,727 DEBUG [hconnection-0x7f6dc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,727 DEBUG [hconnection-0xa968a3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,728 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,728 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,731 DEBUG [hconnection-0x5b54ecf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,731 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,735 DEBUG [hconnection-0x75f8e629-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,736 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,736 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:23:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:46,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:46,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:46,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:46,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289086749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289086749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289086750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,758 DEBUG [hconnection-0x5e0147f0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,758 DEBUG [hconnection-0x4148972a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,759 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,759 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289086763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,766 DEBUG [hconnection-0x2085f1d3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,767 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289086768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,775 DEBUG [hconnection-0x586a6932-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,776 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,783 DEBUG [hconnection-0x792de1bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,784 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f96000c8899a4160b0cfeafe99c1596f_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289026735/Put/seqid=0 2024-11-22T15:23:46,793 DEBUG [hconnection-0x4198bd8c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:23:46,794 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:23:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:46,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742220_1396 (size=12154) 2024-11-22T15:23:46,826 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:46,830 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f96000c8899a4160b0cfeafe99c1596f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f96000c8899a4160b0cfeafe99c1596f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:46,830 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/019d2d1fab3c450d98a3c1907be8eee9, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:46,831 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/019d2d1fab3c450d98a3c1907be8eee9 is 175, key is test_row_0/A:col10/1732289026735/Put/seqid=0 2024-11-22T15:23:46,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289086851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289086851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289086851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742221_1397 (size=30955) 2024-11-22T15:23:46,867 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/019d2d1fab3c450d98a3c1907be8eee9 2024-11-22T15:23:46,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289086868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289086870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:46,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-22T15:23:46,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:46,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:46,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:46,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:46,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:46,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0af71acc0fae436facb5fcb58ec84664 is 50, key is test_row_0/B:col10/1732289026735/Put/seqid=0 2024-11-22T15:23:46,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742222_1398 (size=12001) 2024-11-22T15:23:46,920 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0af71acc0fae436facb5fcb58ec84664 2024-11-22T15:23:46,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/f1105958470f4c1c88b6a2f735816768 is 50, key is test_row_0/C:col10/1732289026735/Put/seqid=0 2024-11-22T15:23:46,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742223_1399 (size=12001) 2024-11-22T15:23:46,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/f1105958470f4c1c88b6a2f735816768 2024-11-22T15:23:46,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/019d2d1fab3c450d98a3c1907be8eee9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9 2024-11-22T15:23:46,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9, entries=150, sequenceid=16, filesize=30.2 K 2024-11-22T15:23:46,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0af71acc0fae436facb5fcb58ec84664 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664 2024-11-22T15:23:46,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664, entries=150, sequenceid=16, filesize=11.7 K 2024-11-22T15:23:46,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/f1105958470f4c1c88b6a2f735816768 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768 2024-11-22T15:23:46,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768, entries=150, sequenceid=16, filesize=11.7 K 2024-11-22T15:23:46,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 661abb5eb8be4eaf4f236a86a23909c7 in 263ms, sequenceid=16, compaction requested=false 2024-11-22T15:23:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-22T15:23:47,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:47,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:47,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:47,027 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:47,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:47,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:47,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289087057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289087058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289087058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122327b765993ca42aeacee5c53d8dd98c9_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289026747/Put/seqid=0 2024-11-22T15:23:47,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289087070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289087073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742224_1400 (size=12154) 2024-11-22T15:23:47,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:47,101 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122327b765993ca42aeacee5c53d8dd98c9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122327b765993ca42aeacee5c53d8dd98c9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:47,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0ceaf3db02a0403096af4aa7c73f934d, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:47,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0ceaf3db02a0403096af4aa7c73f934d is 175, key is test_row_0/A:col10/1732289026747/Put/seqid=0 2024-11-22T15:23:47,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289087159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289087159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289087159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742225_1401 (size=30955) 2024-11-22T15:23:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:47,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289087361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289087361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289087361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289087374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289087376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,569 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0ceaf3db02a0403096af4aa7c73f934d 2024-11-22T15:23:47,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2c8ac830bc2749cea9acb129f79e3137 is 50, key is test_row_0/B:col10/1732289026747/Put/seqid=0 2024-11-22T15:23:47,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742226_1402 (size=12001) 2024-11-22T15:23:47,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289087670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289087671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289087671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:47,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289087883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:47,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:47,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289087889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,037 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2c8ac830bc2749cea9acb129f79e3137 2024-11-22T15:23:48,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/c0dc48514bc14bafa2ad90518fc61104 is 50, key is test_row_0/C:col10/1732289026747/Put/seqid=0 2024-11-22T15:23:48,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742227_1403 (size=12001) 2024-11-22T15:23:48,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:48,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289088178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:48,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289088178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:48,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289088179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,394 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:23:48,479 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/c0dc48514bc14bafa2ad90518fc61104 2024-11-22T15:23:48,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0ceaf3db02a0403096af4aa7c73f934d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d 2024-11-22T15:23:48,493 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d, entries=150, sequenceid=41, filesize=30.2 K 2024-11-22T15:23:48,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2c8ac830bc2749cea9acb129f79e3137 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137 2024-11-22T15:23:48,502 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T15:23:48,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/c0dc48514bc14bafa2ad90518fc61104 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104 2024-11-22T15:23:48,511 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104, entries=150, sequenceid=41, filesize=11.7 K 2024-11-22T15:23:48,515 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 661abb5eb8be4eaf4f236a86a23909c7 in 1488ms, sequenceid=41, compaction requested=false 2024-11-22T15:23:48,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:48,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:48,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-22T15:23:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-22T15:23:48,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-22T15:23:48,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7940 sec 2024-11-22T15:23:48,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.7980 sec 2024-11-22T15:23:48,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-22T15:23:48,828 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-22T15:23:48,829 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-22T15:23:48,830 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-22T15:23:48,831 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:48,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:48,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:48,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:48,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122870c464f34e940108e263a1085156c7d_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:48,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-22T15:23:48,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742228_1404 (size=12154) 2024-11-22T15:23:48,947 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:48,952 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122870c464f34e940108e263a1085156c7d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122870c464f34e940108e263a1085156c7d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:48,953 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5a2c7f914ace43ac9b87db9df174e69b, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:48,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5a2c7f914ace43ac9b87db9df174e69b is 175, key is test_row_0/A:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:48,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742229_1405 (size=30955) 2024-11-22T15:23:48,964 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5a2c7f914ace43ac9b87db9df174e69b 2024-11-22T15:23:48,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/934eb4e6fb884f38a9f9b2af29788691 is 50, key is test_row_0/B:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:48,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-22T15:23:48,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:48,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:48,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:48,986 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:48,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:48,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:48,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:48,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289088988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:48,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289088991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742230_1406 (size=12001) 2024-11-22T15:23:49,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/934eb4e6fb884f38a9f9b2af29788691 2024-11-22T15:23:49,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0edb27edb6144d858f6ab436fdf2035e is 50, key is test_row_0/C:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:49,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742231_1407 (size=12001) 2024-11-22T15:23:49,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0edb27edb6144d858f6ab436fdf2035e 2024-11-22T15:23:49,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5a2c7f914ace43ac9b87db9df174e69b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b 2024-11-22T15:23:49,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b, entries=150, sequenceid=55, filesize=30.2 K 2024-11-22T15:23:49,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/934eb4e6fb884f38a9f9b2af29788691 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691 2024-11-22T15:23:49,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691, entries=150, sequenceid=55, filesize=11.7 K 2024-11-22T15:23:49,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0edb27edb6144d858f6ab436fdf2035e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e 2024-11-22T15:23:49,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289089098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289089098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e, entries=150, sequenceid=55, filesize=11.7 K 2024-11-22T15:23:49,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 661abb5eb8be4eaf4f236a86a23909c7 in 208ms, sequenceid=55, compaction requested=true 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:49,106 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:49,106 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:49,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:49,107 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:49,107 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:49,107 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,107 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=90.7 K 2024-11-22T15:23:49,107 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,108 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b] 2024-11-22T15:23:49,108 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 019d2d1fab3c450d98a3c1907be8eee9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732289026734 2024-11-22T15:23:49,108 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ceaf3db02a0403096af4aa7c73f934d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732289026744 2024-11-22T15:23:49,108 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:49,108 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:49,109 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,109 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.2 K 2024-11-22T15:23:49,109 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a2c7f914ace43ac9b87db9df174e69b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:49,109 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0af71acc0fae436facb5fcb58ec84664, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732289026734 2024-11-22T15:23:49,110 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c8ac830bc2749cea9acb129f79e3137, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732289026744 2024-11-22T15:23:49,110 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 934eb4e6fb884f38a9f9b2af29788691, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:49,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-22T15:23:49,138 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#356 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:49,139 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/8cae1fea5a764ee29526a007f89095f5 is 50, key is test_row_0/B:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:49,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-22T15:23:49,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,142 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T15:23:49,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:49,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:49,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:49,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,149 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,171 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122e17a4614b24a4aec9aca1d55884c64d9_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,173 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122e17a4614b24a4aec9aca1d55884c64d9_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,173 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e17a4614b24a4aec9aca1d55884c64d9_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:49,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228214ff21ee8d4de88ed65be4f835777b_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289028987/Put/seqid=0 2024-11-22T15:23:49,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742232_1408 (size=12104) 2024-11-22T15:23:49,209 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/8cae1fea5a764ee29526a007f89095f5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/8cae1fea5a764ee29526a007f89095f5 2024-11-22T15:23:49,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742233_1409 (size=4469) 2024-11-22T15:23:49,215 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#357 average throughput is 0.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:49,215 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/2c98c0f69bd94adf8a325dcad1ddc4c0 is 175, key is test_row_0/A:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:49,219 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 8cae1fea5a764ee29526a007f89095f5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:49,219 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:49,219 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289029106; duration=0sec 2024-11-22T15:23:49,220 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:49,220 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:49,220 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:49,221 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:49,221 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:49,221 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,221 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.2 K 2024-11-22T15:23:49,221 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f1105958470f4c1c88b6a2f735816768, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732289026734 2024-11-22T15:23:49,222 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c0dc48514bc14bafa2ad90518fc61104, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732289026744 2024-11-22T15:23:49,222 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0edb27edb6144d858f6ab436fdf2035e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:49,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289089225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742234_1410 (size=12154) 2024-11-22T15:23:49,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:49,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289089236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,250 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411228214ff21ee8d4de88ed65be4f835777b_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228214ff21ee8d4de88ed65be4f835777b_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:49,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a5d3e0a80e8d48e196561411045c79db, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a5d3e0a80e8d48e196561411045c79db is 175, key is test_row_0/A:col10/1732289028987/Put/seqid=0 2024-11-22T15:23:49,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289089249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742235_1411 (size=31058) 2024-11-22T15:23:49,272 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:49,272 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/516c9502ef43401f8a75589b194f174c is 50, key is test_row_0/C:col10/1732289028897/Put/seqid=0 2024-11-22T15:23:49,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742236_1412 (size=30955) 2024-11-22T15:23:49,305 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a5d3e0a80e8d48e196561411045c79db 2024-11-22T15:23:49,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289089305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289089306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742237_1413 (size=12104) 2024-11-22T15:23:49,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55d4bc38e9d3440fb29ab3e4e408169f is 50, key is test_row_0/B:col10/1732289028987/Put/seqid=0 2024-11-22T15:23:49,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289089337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289089348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289089364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742238_1414 (size=12001) 2024-11-22T15:23:49,384 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55d4bc38e9d3440fb29ab3e4e408169f 2024-11-22T15:23:49,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6fff161c3f0f453d9e3f482c9095876a is 50, key is test_row_0/C:col10/1732289028987/Put/seqid=0 2024-11-22T15:23:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-22T15:23:49,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742239_1415 (size=12001) 2024-11-22T15:23:49,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289089545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289089553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289089573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289089614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289089614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,673 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/2c98c0f69bd94adf8a325dcad1ddc4c0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0 2024-11-22T15:23:49,680 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 2c98c0f69bd94adf8a325dcad1ddc4c0(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:49,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:49,680 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289029106; duration=0sec 2024-11-22T15:23:49,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:49,680 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:49,734 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/516c9502ef43401f8a75589b194f174c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/516c9502ef43401f8a75589b194f174c 2024-11-22T15:23:49,742 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 516c9502ef43401f8a75589b194f174c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:49,742 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:49,742 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289029106; duration=0sec 2024-11-22T15:23:49,742 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:49,742 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:49,840 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6fff161c3f0f453d9e3f482c9095876a 2024-11-22T15:23:49,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a5d3e0a80e8d48e196561411045c79db as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db 2024-11-22T15:23:49,852 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db, entries=150, sequenceid=77, filesize=30.2 K 2024-11-22T15:23:49,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55d4bc38e9d3440fb29ab3e4e408169f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f 2024-11-22T15:23:49,856 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f, entries=150, sequenceid=77, filesize=11.7 K 2024-11-22T15:23:49,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289089855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:49,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6fff161c3f0f453d9e3f482c9095876a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a 2024-11-22T15:23:49,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289089857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:49,865 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a, entries=150, sequenceid=77, filesize=11.7 K 2024-11-22T15:23:49,866 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 661abb5eb8be4eaf4f236a86a23909c7 in 724ms, sequenceid=77, compaction requested=false 2024-11-22T15:23:49,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:49,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:49,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-22T15:23:49,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-22T15:23:49,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-22T15:23:49,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0360 sec 2024-11-22T15:23:49,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.0400 sec 2024-11-22T15:23:49,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:49,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:49,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:49,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122259797381eb64e6789ade616f226e251_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:49,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742240_1416 (size=14594) 2024-11-22T15:23:49,898 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:49,903 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122259797381eb64e6789ade616f226e251_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122259797381eb64e6789ade616f226e251_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:49,904 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6bdf12d847ff40ec9dada906b9dd7bd4, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:49,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6bdf12d847ff40ec9dada906b9dd7bd4 is 175, key is test_row_0/A:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:49,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742241_1417 (size=39549) 2024-11-22T15:23:49,931 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6bdf12d847ff40ec9dada906b9dd7bd4 2024-11-22T15:23:49,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-22T15:23:49,934 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-22T15:23:49,935 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-22T15:23:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-22T15:23:49,936 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:49,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:49,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:49,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55cd205341014586acd7019f2d0dc1a3 is 50, key is test_row_0/B:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:49,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742242_1418 (size=12001) 2024-11-22T15:23:49,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55cd205341014586acd7019f2d0dc1a3 2024-11-22T15:23:49,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/da76b0310eb94db4892be2f6d9dfacd9 is 50, key is test_row_0/C:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:50,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742243_1419 (size=12001) 2024-11-22T15:23:50,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/da76b0310eb94db4892be2f6d9dfacd9 2024-11-22T15:23:50,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6bdf12d847ff40ec9dada906b9dd7bd4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4 2024-11-22T15:23:50,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4, entries=200, sequenceid=96, filesize=38.6 K 2024-11-22T15:23:50,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/55cd205341014586acd7019f2d0dc1a3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3 2024-11-22T15:23:50,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-22T15:23:50,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3, entries=150, sequenceid=96, filesize=11.7 K 2024-11-22T15:23:50,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/da76b0310eb94db4892be2f6d9dfacd9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9 2024-11-22T15:23:50,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9, entries=150, sequenceid=96, filesize=11.7 K 2024-11-22T15:23:50,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 661abb5eb8be4eaf4f236a86a23909c7 in 169ms, sequenceid=96, compaction requested=true 2024-11-22T15:23:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,050 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,050 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:50,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:50,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:50,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:50,052 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:50,052 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:50,052 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,052 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=99.2 K 2024-11-22T15:23:50,052 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,052 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4] 2024-11-22T15:23:50,054 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:50,054 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:50,054 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,054 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/8cae1fea5a764ee29526a007f89095f5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.3 K 2024-11-22T15:23:50,054 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c98c0f69bd94adf8a325dcad1ddc4c0, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:50,055 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cae1fea5a764ee29526a007f89095f5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:50,055 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5d3e0a80e8d48e196561411045c79db, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289028970 2024-11-22T15:23:50,055 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 55d4bc38e9d3440fb29ab3e4e408169f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289028970 2024-11-22T15:23:50,055 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6bdf12d847ff40ec9dada906b9dd7bd4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029220 2024-11-22T15:23:50,056 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 55cd205341014586acd7019f2d0dc1a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029234 2024-11-22T15:23:50,085 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:50,085 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/288c6883dcf7467a92542c825cedee60 is 50, key is test_row_0/B:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:50,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-22T15:23:50,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,089 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:50,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,093 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:50,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:50,126 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122bbdaa360c4284af1b7e9ba58f50c9e06_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,127 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122bbdaa360c4284af1b7e9ba58f50c9e06_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,128 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122bbdaa360c4284af1b7e9ba58f50c9e06_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742244_1420 (size=12207) 2024-11-22T15:23:50,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d7a390d47e6b41678e783dba9875f82f_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289029968/Put/seqid=0 2024-11-22T15:23:50,153 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/288c6883dcf7467a92542c825cedee60 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/288c6883dcf7467a92542c825cedee60 2024-11-22T15:23:50,158 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 288c6883dcf7467a92542c825cedee60(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:50,158 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,158 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289030050; duration=0sec 2024-11-22T15:23:50,158 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:50,159 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:50,159 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:50,162 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:50,162 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:50,162 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,162 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/516c9502ef43401f8a75589b194f174c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.3 K 2024-11-22T15:23:50,163 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 516c9502ef43401f8a75589b194f174c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732289027056 2024-11-22T15:23:50,163 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fff161c3f0f453d9e3f482c9095876a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289028970 2024-11-22T15:23:50,163 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting da76b0310eb94db4892be2f6d9dfacd9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029234 2024-11-22T15:23:50,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742245_1421 (size=4469) 2024-11-22T15:23:50,180 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#366 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:50,181 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8fb5a451b66474bbb9ae5bd67836d1c is 175, key is test_row_0/A:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:50,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289090175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289090176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742246_1422 (size=12154) 2024-11-22T15:23:50,197 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:50,198 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/9b6a78dcab104426ab5604c5ef40cace is 50, key is test_row_0/C:col10/1732289029234/Put/seqid=0 2024-11-22T15:23:50,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742247_1423 (size=31161) 2024-11-22T15:23:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-22T15:23:50,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742248_1424 (size=12207) 2024-11-22T15:23:50,253 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/9b6a78dcab104426ab5604c5ef40cace as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9b6a78dcab104426ab5604c5ef40cace 2024-11-22T15:23:50,263 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 9b6a78dcab104426ab5604c5ef40cace(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:50,263 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,263 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289030051; duration=0sec 2024-11-22T15:23:50,263 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,265 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:50,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289090288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289090288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289090361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289090363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289090493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289090494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-22T15:23:50,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:50,594 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d7a390d47e6b41678e783dba9875f82f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7a390d47e6b41678e783dba9875f82f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/7c8059c96f4e4976a581c57483153a08, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/7c8059c96f4e4976a581c57483153a08 is 175, key is test_row_0/A:col10/1732289029968/Put/seqid=0 2024-11-22T15:23:50,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742249_1425 (size=30955) 2024-11-22T15:23:50,617 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/7c8059c96f4e4976a581c57483153a08 2024-11-22T15:23:50,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0a4fbeecac3845c494896afe2c13c7b1 is 50, key is test_row_0/B:col10/1732289029968/Put/seqid=0 2024-11-22T15:23:50,631 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8fb5a451b66474bbb9ae5bd67836d1c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c 2024-11-22T15:23:50,643 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into d8fb5a451b66474bbb9ae5bd67836d1c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:50,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,643 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289030050; duration=0sec 2024-11-22T15:23:50,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,643 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742250_1426 (size=12001) 2024-11-22T15:23:50,644 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0a4fbeecac3845c494896afe2c13c7b1 2024-11-22T15:23:50,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/fb57a54887f24e3eb7544bbab614585d is 50, key is test_row_0/C:col10/1732289029968/Put/seqid=0 2024-11-22T15:23:50,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742251_1427 (size=12001) 2024-11-22T15:23:50,679 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/fb57a54887f24e3eb7544bbab614585d 2024-11-22T15:23:50,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/7c8059c96f4e4976a581c57483153a08 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08 2024-11-22T15:23:50,689 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08, entries=150, sequenceid=116, filesize=30.2 K 2024-11-22T15:23:50,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0a4fbeecac3845c494896afe2c13c7b1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1 2024-11-22T15:23:50,694 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1, entries=150, sequenceid=116, filesize=11.7 K 2024-11-22T15:23:50,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/fb57a54887f24e3eb7544bbab614585d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d 2024-11-22T15:23:50,698 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d, entries=150, sequenceid=116, filesize=11.7 K 2024-11-22T15:23:50,699 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 661abb5eb8be4eaf4f236a86a23909c7 in 611ms, sequenceid=116, compaction requested=false 2024-11-22T15:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-22T15:23:50,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-22T15:23:50,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-22T15:23:50,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 763 msec 2024-11-22T15:23:50,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 766 msec 2024-11-22T15:23:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:50,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-22T15:23:50,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:50,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:50,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:50,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:50,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122117fde7e417c4711a4036f103539b2f3_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742252_1428 (size=14744) 2024-11-22T15:23:50,836 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:50,845 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122117fde7e417c4711a4036f103539b2f3_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122117fde7e417c4711a4036f103539b2f3_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:50,846 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5496e3153caf4475b8095302ee0e529b, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:50,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5496e3153caf4475b8095302ee0e529b is 175, key is test_row_0/A:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:50,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289090844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289090845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742253_1429 (size=39699) 2024-11-22T15:23:50,876 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5496e3153caf4475b8095302ee0e529b 2024-11-22T15:23:50,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/7bfcc46591784f35812bcc7e7c144689 is 50, key is test_row_0/B:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742254_1430 (size=12101) 2024-11-22T15:23:50,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/7bfcc46591784f35812bcc7e7c144689 2024-11-22T15:23:50,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/a42056b8b6af4254a0409da845577704 is 50, key is test_row_0/C:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:50,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742255_1431 (size=12101) 2024-11-22T15:23:50,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/a42056b8b6af4254a0409da845577704 2024-11-22T15:23:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289090957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289090957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/5496e3153caf4475b8095302ee0e529b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b 2024-11-22T15:23:50,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b, entries=200, sequenceid=137, filesize=38.8 K 2024-11-22T15:23:50,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:50,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289090967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:50,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/7bfcc46591784f35812bcc7e7c144689 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689 2024-11-22T15:23:50,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689, entries=150, sequenceid=137, filesize=11.8 K 2024-11-22T15:23:50,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/a42056b8b6af4254a0409da845577704 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704 2024-11-22T15:23:50,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704, entries=150, sequenceid=137, filesize=11.8 K 2024-11-22T15:23:50,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 661abb5eb8be4eaf4f236a86a23909c7 in 189ms, sequenceid=137, compaction requested=true 2024-11-22T15:23:50,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:50,984 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:50,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:50,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,984 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:50,985 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:50,985 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:50,985 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,985 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=99.4 K 2024-11-22T15:23:50,985 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,985 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b] 2024-11-22T15:23:50,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:50,985 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:50,985 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:50,985 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:50,986 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:50,986 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/288c6883dcf7467a92542c825cedee60, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.5 K 2024-11-22T15:23:50,986 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8fb5a451b66474bbb9ae5bd67836d1c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029234 2024-11-22T15:23:50,986 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 288c6883dcf7467a92542c825cedee60, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029234 2024-11-22T15:23:50,986 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a4fbeecac3845c494896afe2c13c7b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732289029968 2024-11-22T15:23:50,986 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c8059c96f4e4976a581c57483153a08, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732289029968 2024-11-22T15:23:50,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:50,987 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bfcc46591784f35812bcc7e7c144689, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:50,987 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5496e3153caf4475b8095302ee0e529b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:50,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:50,997 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#374 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:50,998 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1ff1e412c21b44dbbbc03e58b94014e9 is 50, key is test_row_0/B:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:51,003 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:51,023 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411222b981f4b7841471c9fe25f2ace214f2c_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:51,024 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411222b981f4b7841471c9fe25f2ace214f2c_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:51,024 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222b981f4b7841471c9fe25f2ace214f2c_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:51,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-22T15:23:51,039 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-22T15:23:51,040 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:51,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-22T15:23:51,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:51,042 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:51,043 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:51,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:51,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742256_1432 (size=12409) 2024-11-22T15:23:51,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742257_1433 (size=4469) 2024-11-22T15:23:51,110 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#375 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:51,111 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/abe312df5333471eadf4c807ea91335b is 175, key is test_row_0/A:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742258_1434 (size=31363) 2024-11-22T15:23:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:51,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:51,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:51,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fb3dbf5ea4f148cda2d1bfc5b5224996_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289030844/Put/seqid=0 2024-11-22T15:23:51,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742259_1435 (size=17284) 2024-11-22T15:23:51,222 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:51,227 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fb3dbf5ea4f148cda2d1bfc5b5224996_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb3dbf5ea4f148cda2d1bfc5b5224996_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:51,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289091219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,231 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/924b1f1eb5384dab8ecbead9181f09c3, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:51,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/924b1f1eb5384dab8ecbead9181f09c3 is 175, key is test_row_0/A:col10/1732289030844/Put/seqid=0 2024-11-22T15:23:51,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289091229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289091231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742260_1436 (size=48389) 2024-11-22T15:23:51,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289091332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:51,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289091341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289091342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289091368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289091376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,471 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1ff1e412c21b44dbbbc03e58b94014e9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1ff1e412c21b44dbbbc03e58b94014e9 2024-11-22T15:23:51,475 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 1ff1e412c21b44dbbbc03e58b94014e9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:51,475 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:51,475 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289030984; duration=0sec 2024-11-22T15:23:51,475 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:51,475 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:51,475 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:51,476 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:51,476 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:51,476 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,476 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9b6a78dcab104426ab5604c5ef40cace, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=35.5 K 2024-11-22T15:23:51,477 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b6a78dcab104426ab5604c5ef40cace, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732289029234 2024-11-22T15:23:51,477 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting fb57a54887f24e3eb7544bbab614585d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732289029968 2024-11-22T15:23:51,477 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a42056b8b6af4254a0409da845577704, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:51,492 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#377 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:51,493 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5fd7dca0956944c9af204e0b19b0bcbc is 50, key is test_row_0/C:col10/1732289030174/Put/seqid=0 2024-11-22T15:23:51,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,545 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/abe312df5333471eadf4c807ea91335b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b 2024-11-22T15:23:51,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289091539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742261_1437 (size=12409) 2024-11-22T15:23:51,558 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into abe312df5333471eadf4c807ea91335b(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:51,558 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:51,558 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289030984; duration=0sec 2024-11-22T15:23:51,558 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:51,558 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:51,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289091550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289091550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:51,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,675 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/924b1f1eb5384dab8ecbead9181f09c3 2024-11-22T15:23:51,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/16da4df195504d2082e0b4814767378e is 50, key is test_row_0/B:col10/1732289030844/Put/seqid=0 2024-11-22T15:23:51,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742262_1438 (size=12151) 2024-11-22T15:23:51,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/16da4df195504d2082e0b4814767378e 2024-11-22T15:23:51,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ca93c43c73a74c9d96573f0bdf49ddcd is 50, key is test_row_0/C:col10/1732289030844/Put/seqid=0 2024-11-22T15:23:51,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742263_1439 (size=12151) 2024-11-22T15:23:51,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289091847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289091864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:51,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289091865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,951 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5fd7dca0956944c9af204e0b19b0bcbc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5fd7dca0956944c9af204e0b19b0bcbc 2024-11-22T15:23:51,957 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 5fd7dca0956944c9af204e0b19b0bcbc(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:51,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:51,957 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289030986; duration=0sec 2024-11-22T15:23:51,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:51,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:51,964 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:51,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:51,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:51,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:51,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:52,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:52,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:52,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:52,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:52,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:52,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:52,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:52,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ca93c43c73a74c9d96573f0bdf49ddcd 2024-11-22T15:23:52,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/924b1f1eb5384dab8ecbead9181f09c3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3 2024-11-22T15:23:52,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3, entries=250, sequenceid=157, filesize=47.3 K 2024-11-22T15:23:52,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/16da4df195504d2082e0b4814767378e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e 2024-11-22T15:23:52,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e, entries=150, sequenceid=157, filesize=11.9 K 2024-11-22T15:23:52,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ca93c43c73a74c9d96573f0bdf49ddcd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd 2024-11-22T15:23:52,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd, entries=150, sequenceid=157, filesize=11.9 K 2024-11-22T15:23:52,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 661abb5eb8be4eaf4f236a86a23909c7 in 1046ms, sequenceid=157, compaction requested=false 2024-11-22T15:23:52,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:52,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-22T15:23:52,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:52,270 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:52,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fcc988fa242f4f7a969d118f74d1ce05_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289031203/Put/seqid=0 2024-11-22T15:23:52,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742264_1440 (size=12304) 2024-11-22T15:23:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:52,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:52,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289092460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289092460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289092462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289092568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289092568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289092569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:52,736 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122fcc988fa242f4f7a969d118f74d1ce05_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fcc988fa242f4f7a969d118f74d1ce05_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:52,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/26fe80c81218483ab556c76e43b8dfdf, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:52,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/26fe80c81218483ab556c76e43b8dfdf is 175, key is test_row_0/A:col10/1732289031203/Put/seqid=0 2024-11-22T15:23:52,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742265_1441 (size=31105) 2024-11-22T15:23:52,769 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/26fe80c81218483ab556c76e43b8dfdf 2024-11-22T15:23:52,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289092776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/40fd8f6eb9104e838e613358d47d217d is 50, key is test_row_0/B:col10/1732289031203/Put/seqid=0 2024-11-22T15:23:52,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289092776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289092777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:52,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742266_1442 (size=12151) 2024-11-22T15:23:52,993 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:23:53,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289093082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289093083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289093083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:53,197 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/40fd8f6eb9104e838e613358d47d217d 2024-11-22T15:23:53,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/63afbe70dcb748e8abbe3c95e91f8908 is 50, key is test_row_0/C:col10/1732289031203/Put/seqid=0 2024-11-22T15:23:53,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742267_1443 (size=12151) 2024-11-22T15:23:53,241 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/63afbe70dcb748e8abbe3c95e91f8908 2024-11-22T15:23:53,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/26fe80c81218483ab556c76e43b8dfdf as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf 2024-11-22T15:23:53,252 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf, entries=150, sequenceid=176, filesize=30.4 K 2024-11-22T15:23:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/40fd8f6eb9104e838e613358d47d217d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d 2024-11-22T15:23:53,261 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d, entries=150, sequenceid=176, filesize=11.9 K 2024-11-22T15:23:53,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/63afbe70dcb748e8abbe3c95e91f8908 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908 2024-11-22T15:23:53,268 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908, entries=150, sequenceid=176, filesize=11.9 K 2024-11-22T15:23:53,268 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 661abb5eb8be4eaf4f236a86a23909c7 in 999ms, sequenceid=176, compaction requested=true 2024-11-22T15:23:53,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:53,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:53,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-22T15:23:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-22T15:23:53,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-22T15:23:53,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2270 sec 2024-11-22T15:23:53,272 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.2310 sec 2024-11-22T15:23:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:53,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:53,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:53,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122734ac57d13c147f9b14c4639d8e5992d_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:53,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742268_1444 (size=17284) 2024-11-22T15:23:53,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289093458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289093459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289093561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289093561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289093592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289093592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289093593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289093769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:53,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289093769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:53,841 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:53,844 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122734ac57d13c147f9b14c4639d8e5992d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122734ac57d13c147f9b14c4639d8e5992d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:53,848 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d08360cc57da4b69a5c6abdf97aa01b5, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:53,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d08360cc57da4b69a5c6abdf97aa01b5 is 175, key is test_row_0/A:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:53,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742269_1445 (size=48389) 2024-11-22T15:23:53,893 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d08360cc57da4b69a5c6abdf97aa01b5 2024-11-22T15:23:53,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/09ae589b22a6476a86045f4ecff27a99 is 50, key is test_row_0/B:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:53,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742270_1446 (size=12151) 2024-11-22T15:23:53,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/09ae589b22a6476a86045f4ecff27a99 2024-11-22T15:23:53,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/1c861e94d9654997875a5dc1759db1ed is 50, key is test_row_0/C:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:53,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742271_1447 (size=12151) 2024-11-22T15:23:53,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/1c861e94d9654997875a5dc1759db1ed 2024-11-22T15:23:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d08360cc57da4b69a5c6abdf97aa01b5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5 2024-11-22T15:23:54,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5, entries=250, sequenceid=198, filesize=47.3 K 2024-11-22T15:23:54,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/09ae589b22a6476a86045f4ecff27a99 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99 2024-11-22T15:23:54,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99, entries=150, sequenceid=198, filesize=11.9 K 2024-11-22T15:23:54,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/1c861e94d9654997875a5dc1759db1ed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed 2024-11-22T15:23:54,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed, entries=150, sequenceid=198, filesize=11.9 K 2024-11-22T15:23:54,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 661abb5eb8be4eaf4f236a86a23909c7 in 627ms, sequenceid=198, compaction requested=true 2024-11-22T15:23:54,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:54,029 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:54,031 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159246 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:54,031 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:54,031 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:54,031 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=155.5 K 2024-11-22T15:23:54,031 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:54,031 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5] 2024-11-22T15:23:54,032 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting abe312df5333471eadf4c807ea91335b, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:54,032 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 924b1f1eb5384dab8ecbead9181f09c3, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732289030840 2024-11-22T15:23:54,032 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26fe80c81218483ab556c76e43b8dfdf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732289031203 2024-11-22T15:23:54,033 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d08360cc57da4b69a5c6abdf97aa01b5, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032414 2024-11-22T15:23:54,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:54,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:54,037 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:54,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:54,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:54,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:54,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:54,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:54,038 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:54,038 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:54,039 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1ff1e412c21b44dbbbc03e58b94014e9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=47.7 K 2024-11-22T15:23:54,039 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ff1e412c21b44dbbbc03e58b94014e9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:54,041 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 16da4df195504d2082e0b4814767378e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732289030844 2024-11-22T15:23:54,041 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 40fd8f6eb9104e838e613358d47d217d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732289031203 2024-11-22T15:23:54,043 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ae589b22a6476a86045f4ecff27a99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032460 2024-11-22T15:23:54,052 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:54,072 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#387 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:54,072 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1d11d9c819f348c3b365aa8045c3f82c is 50, key is test_row_0/B:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:54,087 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411222f57a0727a5a406e9bf93235773df2f3_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:54,089 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411222f57a0727a5a406e9bf93235773df2f3_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:54,089 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222f57a0727a5a406e9bf93235773df2f3_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:54,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:54,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:54,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742272_1448 (size=12595) 2024-11-22T15:23:54,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221669961546aa482c9a49d7fab3d080b9_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289033457/Put/seqid=0 2024-11-22T15:23:54,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742273_1449 (size=4469) 2024-11-22T15:23:54,171 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#386 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:54,172 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/dcc5db6081db418d86fe46f41d0d77d4 is 175, key is test_row_0/A:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:54,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742274_1450 (size=12304) 2024-11-22T15:23:54,177 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:54,181 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221669961546aa482c9a49d7fab3d080b9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221669961546aa482c9a49d7fab3d080b9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:54,182 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8db175f7ce74feda7a74e089b3880fc, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:54,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8db175f7ce74feda7a74e089b3880fc is 175, key is test_row_0/A:col10/1732289033457/Put/seqid=0 2024-11-22T15:23:54,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742276_1452 (size=31105) 2024-11-22T15:23:54,226 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8db175f7ce74feda7a74e089b3880fc 2024-11-22T15:23:54,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742275_1451 (size=31549) 2024-11-22T15:23:54,237 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/dcc5db6081db418d86fe46f41d0d77d4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4 2024-11-22T15:23:54,242 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into dcc5db6081db418d86fe46f41d0d77d4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:54,242 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:54,242 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=12, startTime=1732289034029; duration=0sec 2024-11-22T15:23:54,242 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:54,242 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:54,242 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:54,243 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:54,243 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:54,243 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:54,243 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5fd7dca0956944c9af204e0b19b0bcbc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=47.7 K 2024-11-22T15:23:54,243 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fd7dca0956944c9af204e0b19b0bcbc, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732289030153 2024-11-22T15:23:54,244 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca93c43c73a74c9d96573f0bdf49ddcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732289030844 2024-11-22T15:23:54,244 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63afbe70dcb748e8abbe3c95e91f8908, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732289031203 2024-11-22T15:23:54,244 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c861e94d9654997875a5dc1759db1ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032460 2024-11-22T15:23:54,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a0de90f391654baf86d2b5d52b46a6c7 is 50, key is test_row_0/B:col10/1732289033457/Put/seqid=0 2024-11-22T15:23:54,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289094237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289094238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,261 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:54,262 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/42e658ac4a8b40bbac4c4ae574d18aea is 50, key is test_row_0/C:col10/1732289032460/Put/seqid=0 2024-11-22T15:23:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742277_1453 (size=12151) 2024-11-22T15:23:54,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742278_1454 (size=12595) 2024-11-22T15:23:54,311 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/42e658ac4a8b40bbac4c4ae574d18aea as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/42e658ac4a8b40bbac4c4ae574d18aea 2024-11-22T15:23:54,321 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 42e658ac4a8b40bbac4c4ae574d18aea(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:54,322 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:54,322 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=12, startTime=1732289034038; duration=0sec 2024-11-22T15:23:54,322 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:54,322 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:54,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289094354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289094355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,550 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1d11d9c819f348c3b365aa8045c3f82c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1d11d9c819f348c3b365aa8045c3f82c 2024-11-22T15:23:54,558 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 1d11d9c819f348c3b365aa8045c3f82c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:54,558 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:54,558 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=12, startTime=1732289034037; duration=0sec 2024-11-22T15:23:54,558 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:54,558 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:54,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289094560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289094565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289094604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289094607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289094609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a0de90f391654baf86d2b5d52b46a6c7 2024-11-22T15:23:54,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5f1e85641d624ee0b2d5b2c8afbd401f is 50, key is test_row_0/C:col10/1732289033457/Put/seqid=0 2024-11-22T15:23:54,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742279_1455 (size=12151) 2024-11-22T15:23:54,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5f1e85641d624ee0b2d5b2c8afbd401f 2024-11-22T15:23:54,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/d8db175f7ce74feda7a74e089b3880fc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc 2024-11-22T15:23:54,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc, entries=150, sequenceid=214, filesize=30.4 K 2024-11-22T15:23:54,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a0de90f391654baf86d2b5d52b46a6c7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7 2024-11-22T15:23:54,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7, entries=150, sequenceid=214, filesize=11.9 K 2024-11-22T15:23:54,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5f1e85641d624ee0b2d5b2c8afbd401f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f 2024-11-22T15:23:54,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f, entries=150, sequenceid=214, filesize=11.9 K 2024-11-22T15:23:54,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 661abb5eb8be4eaf4f236a86a23909c7 in 680ms, sequenceid=214, compaction requested=false 2024-11-22T15:23:54,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:54,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:54,876 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:54,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:54,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229dbb61bc9cfe48fba4ea9451be2731c9_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742280_1456 (size=14794) 2024-11-22T15:23:54,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289094971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:54,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:54,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289094971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289095082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289095082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-22T15:23:55,147 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-22T15:23:55,148 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-22T15:23:55,150 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:55,150 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:55,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:55,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:55,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289095295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289095295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:55,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:55,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,344 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:55,349 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229dbb61bc9cfe48fba4ea9451be2731c9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229dbb61bc9cfe48fba4ea9451be2731c9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:55,350 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ab0553a98012439c92dd7e36cbf593f9, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ab0553a98012439c92dd7e36cbf593f9 is 175, key is test_row_0/A:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:55,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742281_1457 (size=39749) 2024-11-22T15:23:55,392 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ab0553a98012439c92dd7e36cbf593f9 2024-11-22T15:23:55,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3a8a108a6e814a649535b3f112efeebd is 50, key is test_row_0/B:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:55,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742282_1458 (size=12151) 2024-11-22T15:23:55,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3a8a108a6e814a649535b3f112efeebd 2024-11-22T15:23:55,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0a3ea6d1dca642f6b13a58d9e3ddc440 is 50, key is test_row_0/C:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:55,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:55,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:55,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742283_1459 (size=12151) 2024-11-22T15:23:55,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0a3ea6d1dca642f6b13a58d9e3ddc440 2024-11-22T15:23:55,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ab0553a98012439c92dd7e36cbf593f9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9 2024-11-22T15:23:55,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9, entries=200, sequenceid=238, filesize=38.8 K 2024-11-22T15:23:55,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3a8a108a6e814a649535b3f112efeebd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd 2024-11-22T15:23:55,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd, entries=150, sequenceid=238, filesize=11.9 K 2024-11-22T15:23:55,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/0a3ea6d1dca642f6b13a58d9e3ddc440 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440 2024-11-22T15:23:55,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440, entries=150, sequenceid=238, filesize=11.9 K 2024-11-22T15:23:55,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 661abb5eb8be4eaf4f236a86a23909c7 in 644ms, sequenceid=238, compaction requested=true 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:55,519 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:55,519 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:55,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:55,520 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:55,520 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:55,520 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:55,520 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:55,520 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,520 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,520 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1d11d9c819f348c3b365aa8045c3f82c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.0 K 2024-11-22T15:23:55,520 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=100.0 K 2024-11-22T15:23:55,521 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,521 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9] 2024-11-22T15:23:55,521 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d11d9c819f348c3b365aa8045c3f82c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032460 2024-11-22T15:23:55,521 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a0de90f391654baf86d2b5d52b46a6c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732289033457 2024-11-22T15:23:55,521 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a8a108a6e814a649535b3f112efeebd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:55,522 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcc5db6081db418d86fe46f41d0d77d4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032460 2024-11-22T15:23:55,522 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8db175f7ce74feda7a74e089b3880fc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732289033457 2024-11-22T15:23:55,525 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab0553a98012439c92dd7e36cbf593f9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:55,532 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:55,533 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/27744129772d44048dcda2f383347546 is 50, key is test_row_0/B:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:55,547 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,571 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122461ced19c4b945d785c092c6b477dd6e_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,573 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122461ced19c4b945d785c092c6b477dd6e_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,573 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122461ced19c4b945d785c092c6b477dd6e_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742284_1460 (size=12697) 2024-11-22T15:23:55,601 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/27744129772d44048dcda2f383347546 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/27744129772d44048dcda2f383347546 2024-11-22T15:23:55,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:23:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:55,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:55,609 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 27744129772d44048dcda2f383347546(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:55,609 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:55,609 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289035519; duration=0sec 2024-11-22T15:23:55,609 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:55,609 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:55,609 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:55,611 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:55,611 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:55,611 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,611 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/42e658ac4a8b40bbac4c4ae574d18aea, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.0 K 2024-11-22T15:23:55,612 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:55,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:55,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,619 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 42e658ac4a8b40bbac4c4ae574d18aea, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732289032460 2024-11-22T15:23:55,619 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f1e85641d624ee0b2d5b2c8afbd401f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732289033457 2024-11-22T15:23:55,620 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a3ea6d1dca642f6b13a58d9e3ddc440, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:55,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742285_1461 (size=4469) 2024-11-22T15:23:55,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122041e8dc911e2494c93b765caabb85a2d_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289034960/Put/seqid=0 2024-11-22T15:23:55,644 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:55,645 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ab0f1e0e5f404617bf592eec7135fd58 is 50, key is test_row_0/C:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:55,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742287_1463 (size=12697) 2024-11-22T15:23:55,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742286_1462 (size=14794) 2024-11-22T15:23:55,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:55,665 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ab0f1e0e5f404617bf592eec7135fd58 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ab0f1e0e5f404617bf592eec7135fd58 2024-11-22T15:23:55,669 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122041e8dc911e2494c93b765caabb85a2d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122041e8dc911e2494c93b765caabb85a2d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:55,673 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/b9e9071e701e4e619b62813a31b74114, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:55,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/b9e9071e701e4e619b62813a31b74114 is 175, key is test_row_0/A:col10/1732289034960/Put/seqid=0 2024-11-22T15:23:55,678 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into ab0f1e0e5f404617bf592eec7135fd58(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:55,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:55,678 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289035519; duration=0sec 2024-11-22T15:23:55,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:55,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:55,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289095671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289095678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742288_1464 (size=39749) 2024-11-22T15:23:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:55,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289095780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289095789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:55,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:55,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:55,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:55,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:55,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:55,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289095988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289095997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,026 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#396 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:56,026 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/25c8a13fddc54164b3481520d121af60 is 175, key is test_row_0/A:col10/1732289034874/Put/seqid=0 2024-11-22T15:23:56,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742289_1465 (size=31651) 2024-11-22T15:23:56,066 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/25c8a13fddc54164b3481520d121af60 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60 2024-11-22T15:23:56,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,076 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 25c8a13fddc54164b3481520d121af60(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:56,076 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:56,076 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289035519; duration=0sec 2024-11-22T15:23:56,077 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:56,077 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:56,114 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/b9e9071e701e4e619b62813a31b74114 2024-11-22T15:23:56,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/73b55fb0701b48f3a184b8d29ac4dbed is 50, key is test_row_0/B:col10/1732289034960/Put/seqid=0 2024-11-22T15:23:56,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742290_1466 (size=12151) 2024-11-22T15:23:56,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/73b55fb0701b48f3a184b8d29ac4dbed 2024-11-22T15:23:56,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/092ca142a40c49b68f29e233d75eed48 is 50, key is test_row_0/C:col10/1732289034960/Put/seqid=0 2024-11-22T15:23:56,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742291_1467 (size=12151) 2024-11-22T15:23:56,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:56,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289096291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289096305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/092ca142a40c49b68f29e233d75eed48 2024-11-22T15:23:56,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/b9e9071e701e4e619b62813a31b74114 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114 2024-11-22T15:23:56,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114, entries=200, sequenceid=255, filesize=38.8 K 2024-11-22T15:23:56,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/73b55fb0701b48f3a184b8d29ac4dbed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed 2024-11-22T15:23:56,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed, entries=150, sequenceid=255, filesize=11.9 K 2024-11-22T15:23:56,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/092ca142a40c49b68f29e233d75eed48 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48 2024-11-22T15:23:56,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289096612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,617 DEBUG [Thread-1771 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:23:56,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48, entries=150, sequenceid=255, filesize=11.9 K 2024-11-22T15:23:56,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 661abb5eb8be4eaf4f236a86a23909c7 in 1017ms, sequenceid=255, compaction requested=false 2024-11-22T15:23:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:56,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:56,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:56,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e8af8674aa5a48e3ac735b0740231ef2_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742292_1468 (size=14994) 2024-11-22T15:23:56,675 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:56,679 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e8af8674aa5a48e3ac735b0740231ef2_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e8af8674aa5a48e3ac735b0740231ef2_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:56,681 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/895c588731434234b0afa6a14a277977, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:56,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/895c588731434234b0afa6a14a277977 is 175, key is test_row_0/A:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742293_1469 (size=39949) 2024-11-22T15:23:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,698 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/895c588731434234b0afa6a14a277977 2024-11-22T15:23:56,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289096703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289096706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/501741b7029b41b99ce3fccb9c080838 is 50, key is test_row_0/B:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742294_1470 (size=12301) 2024-11-22T15:23:56,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/501741b7029b41b99ce3fccb9c080838 2024-11-22T15:23:56,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/9250615c5d224a5bbc7fc35adc1aeda2 is 50, key is test_row_0/C:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742295_1471 (size=12301) 2024-11-22T15:23:56,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/9250615c5d224a5bbc7fc35adc1aeda2 2024-11-22T15:23:56,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/895c588731434234b0afa6a14a277977 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977 2024-11-22T15:23:56,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977, entries=200, sequenceid=278, filesize=39.0 K 2024-11-22T15:23:56,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/501741b7029b41b99ce3fccb9c080838 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838 2024-11-22T15:23:56,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838, entries=150, sequenceid=278, filesize=12.0 K 2024-11-22T15:23:56,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/9250615c5d224a5bbc7fc35adc1aeda2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2 2024-11-22T15:23:56,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289096806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289096808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:56,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289096812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2, entries=150, sequenceid=278, filesize=12.0 K 2024-11-22T15:23:56,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 661abb5eb8be4eaf4f236a86a23909c7 in 193ms, sequenceid=278, compaction requested=true 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:56,818 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:56,818 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:56,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:56,819 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111349 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:56,819 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:56,819 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:56,819 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:56,819 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,819 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,819 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=108.7 K 2024-11-22T15:23:56,819 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/27744129772d44048dcda2f383347546, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.3 K 2024-11-22T15:23:56,819 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,819 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977] 2024-11-22T15:23:56,820 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25c8a13fddc54164b3481520d121af60, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:56,820 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9e9071e701e4e619b62813a31b74114, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732289034909 2024-11-22T15:23:56,820 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 895c588731434234b0afa6a14a277977, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:23:56,827 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 27744129772d44048dcda2f383347546, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:56,827 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 73b55fb0701b48f3a184b8d29ac4dbed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732289034960 2024-11-22T15:23:56,827 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 501741b7029b41b99ce3fccb9c080838, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:23:56,836 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:56,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:56,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:56,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:56,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:56,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:56,853 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:56,854 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/c900f769635a48f885c69c33fe147980 is 50, key is test_row_0/B:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,863 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122d6d869f5dea644dba54a1238f1f23535_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:56,865 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122d6d869f5dea644dba54a1238f1f23535_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:56,866 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d6d869f5dea644dba54a1238f1f23535_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:56,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a40f4a4c442848299de16872def09234_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289036834/Put/seqid=0 2024-11-22T15:23:56,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742296_1472 (size=12949) 2024-11-22T15:23:56,907 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/c900f769635a48f885c69c33fe147980 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/c900f769635a48f885c69c33fe147980 2024-11-22T15:23:56,911 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into c900f769635a48f885c69c33fe147980(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:56,911 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:56,911 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289036818; duration=0sec 2024-11-22T15:23:56,911 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:56,911 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:23:56,911 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:23:56,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:23:56,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:23:56,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:56,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ab0f1e0e5f404617bf592eec7135fd58, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.3 K 2024-11-22T15:23:56,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ab0f1e0e5f404617bf592eec7135fd58, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732289034213 2024-11-22T15:23:56,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 092ca142a40c49b68f29e233d75eed48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732289034960 2024-11-22T15:23:56,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9250615c5d224a5bbc7fc35adc1aeda2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:23:56,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742297_1473 (size=4469) 2024-11-22T15:23:56,927 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#404 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:56,927 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/96c45a8957b24d91918c60bff6bef3c1 is 175, key is test_row_0/A:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,945 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:56,946 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/cb9cd30f30094fc3b45985e5bd64f102 is 50, key is test_row_0/C:col10/1732289035677/Put/seqid=0 2024-11-22T15:23:56,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742298_1474 (size=12454) 2024-11-22T15:23:57,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742299_1475 (size=31903) 2024-11-22T15:23:57,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289096999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,016 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/96c45a8957b24d91918c60bff6bef3c1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1 2024-11-22T15:23:57,022 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 96c45a8957b24d91918c60bff6bef3c1(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:57,022 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:57,022 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289036818; duration=0sec 2024-11-22T15:23:57,022 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:57,022 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:23:57,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289097016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742300_1476 (size=12949) 2024-11-22T15:23:57,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289097112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:57,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289097316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289097332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,372 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:57,380 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a40f4a4c442848299de16872def09234_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a40f4a4c442848299de16872def09234_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:57,381 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/09c361749e40448fb10b071d8a7f63e3, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:57,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/09c361749e40448fb10b071d8a7f63e3 is 175, key is test_row_0/A:col10/1732289036834/Put/seqid=0 2024-11-22T15:23:57,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742301_1477 (size=31255) 2024-11-22T15:23:57,437 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/cb9cd30f30094fc3b45985e5bd64f102 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cb9cd30f30094fc3b45985e5bd64f102 2024-11-22T15:23:57,441 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into cb9cd30f30094fc3b45985e5bd64f102(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:23:57,441 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:57,441 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289036818; duration=0sec 2024-11-22T15:23:57,441 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:57,442 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:23:57,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289097622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,767 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,790 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/09c361749e40448fb10b071d8a7f63e3 2024-11-22T15:23:57,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/58aa5055ca7d4091a05b7ba278cf7dc9 is 50, key is test_row_0/B:col10/1732289036834/Put/seqid=0 2024-11-22T15:23:57,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289097820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289097822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289097835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742302_1478 (size=12301) 2024-11-22T15:23:57,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/58aa5055ca7d4091a05b7ba278cf7dc9 2024-11-22T15:23:57,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/90403198d8a44db0b3c0f3b33bee39a8 is 50, key is test_row_0/C:col10/1732289036834/Put/seqid=0 2024-11-22T15:23:57,920 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:57,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:57,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:57,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742303_1479 (size=12301) 2024-11-22T15:23:58,074 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:58,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:58,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:58,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289098125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:58,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:58,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:58,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/90403198d8a44db0b3c0f3b33bee39a8 2024-11-22T15:23:58,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/09c361749e40448fb10b071d8a7f63e3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3 2024-11-22T15:23:58,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3, entries=150, sequenceid=295, filesize=30.5 K 2024-11-22T15:23:58,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/58aa5055ca7d4091a05b7ba278cf7dc9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9 2024-11-22T15:23:58,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9, entries=150, sequenceid=295, filesize=12.0 K 2024-11-22T15:23:58,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/90403198d8a44db0b3c0f3b33bee39a8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8 2024-11-22T15:23:58,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8, entries=150, sequenceid=295, filesize=12.0 K 2024-11-22T15:23:58,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 661abb5eb8be4eaf4f236a86a23909c7 in 1517ms, sequenceid=295, compaction requested=false 2024-11-22T15:23:58,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:58,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:58,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,380 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:58,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:58,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122941d026bfcb74d599b687fa8e35de6c1_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289036986/Put/seqid=0 2024-11-22T15:23:58,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742304_1480 (size=12454) 2024-11-22T15:23:58,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,448 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122941d026bfcb74d599b687fa8e35de6c1_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122941d026bfcb74d599b687fa8e35de6c1_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:58,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/aa003385f26c43d18a4aa71e70e81d0b, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:58,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/aa003385f26c43d18a4aa71e70e81d0b is 175, key is test_row_0/A:col10/1732289036986/Put/seqid=0 2024-11-22T15:23:58,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742305_1481 (size=31255) 2024-11-22T15:23:58,485 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/aa003385f26c43d18a4aa71e70e81d0b 2024-11-22T15:23:58,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/95df3d3c3e1f4520bf3f489f0383f04b is 50, key is test_row_0/B:col10/1732289036986/Put/seqid=0 2024-11-22T15:23:58,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742306_1482 (size=12301) 2024-11-22T15:23:58,520 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/95df3d3c3e1f4520bf3f489f0383f04b 2024-11-22T15:23:58,545 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977] to archive 2024-11-22T15:23:58,547 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:58,548 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/019d2d1fab3c450d98a3c1907be8eee9 2024-11-22T15:23:58,549 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0ceaf3db02a0403096af4aa7c73f934d 2024-11-22T15:23:58,550 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2c98c0f69bd94adf8a325dcad1ddc4c0 2024-11-22T15:23:58,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/02126ed47d304dac8e564d19f7518bf2 is 50, key is test_row_0/C:col10/1732289036986/Put/seqid=0 2024-11-22T15:23:58,557 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5a2c7f914ace43ac9b87db9df174e69b 2024-11-22T15:23:58,559 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a5d3e0a80e8d48e196561411045c79db 2024-11-22T15:23:58,561 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6bdf12d847ff40ec9dada906b9dd7bd4 2024-11-22T15:23:58,562 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8fb5a451b66474bbb9ae5bd67836d1c 2024-11-22T15:23:58,566 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/7c8059c96f4e4976a581c57483153a08 2024-11-22T15:23:58,566 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/5496e3153caf4475b8095302ee0e529b 2024-11-22T15:23:58,567 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/abe312df5333471eadf4c807ea91335b 2024-11-22T15:23:58,570 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/924b1f1eb5384dab8ecbead9181f09c3 2024-11-22T15:23:58,572 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/26fe80c81218483ab556c76e43b8dfdf 2024-11-22T15:23:58,573 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d08360cc57da4b69a5c6abdf97aa01b5 2024-11-22T15:23:58,576 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/dcc5db6081db418d86fe46f41d0d77d4 2024-11-22T15:23:58,585 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/d8db175f7ce74feda7a74e089b3880fc 2024-11-22T15:23:58,586 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ab0553a98012439c92dd7e36cbf593f9 2024-11-22T15:23:58,588 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/25c8a13fddc54164b3481520d121af60 2024-11-22T15:23:58,589 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/b9e9071e701e4e619b62813a31b74114 2024-11-22T15:23:58,591 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/895c588731434234b0afa6a14a277977 2024-11-22T15:23:58,594 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/8cae1fea5a764ee29526a007f89095f5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/288c6883dcf7467a92542c825cedee60, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1ff1e412c21b44dbbbc03e58b94014e9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1d11d9c819f348c3b365aa8045c3f82c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/27744129772d44048dcda2f383347546, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838] to archive 2024-11-22T15:23:58,598 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:58,605 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0af71acc0fae436facb5fcb58ec84664 2024-11-22T15:23:58,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742307_1483 (size=12301) 2024-11-22T15:23:58,606 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/02126ed47d304dac8e564d19f7518bf2 2024-11-22T15:23:58,609 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2c8ac830bc2749cea9acb129f79e3137 2024-11-22T15:23:58,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/aa003385f26c43d18a4aa71e70e81d0b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b 2024-11-22T15:23:58,611 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/8cae1fea5a764ee29526a007f89095f5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/8cae1fea5a764ee29526a007f89095f5 2024-11-22T15:23:58,614 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/934eb4e6fb884f38a9f9b2af29788691 2024-11-22T15:23:58,615 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55d4bc38e9d3440fb29ab3e4e408169f 2024-11-22T15:23:58,616 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/288c6883dcf7467a92542c825cedee60 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/288c6883dcf7467a92542c825cedee60 2024-11-22T15:23:58,617 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/55cd205341014586acd7019f2d0dc1a3 2024-11-22T15:23:58,617 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b, entries=150, sequenceid=317, filesize=30.5 K 2024-11-22T15:23:58,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/95df3d3c3e1f4520bf3f489f0383f04b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b 2024-11-22T15:23:58,618 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0a4fbeecac3845c494896afe2c13c7b1 2024-11-22T15:23:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,621 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1ff1e412c21b44dbbbc03e58b94014e9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1ff1e412c21b44dbbbc03e58b94014e9 2024-11-22T15:23:58,622 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/7bfcc46591784f35812bcc7e7c144689 2024-11-22T15:23:58,623 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b, entries=150, sequenceid=317, filesize=12.0 K 2024-11-22T15:23:58,623 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/16da4df195504d2082e0b4814767378e 2024-11-22T15:23:58,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/02126ed47d304dac8e564d19f7518bf2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2 2024-11-22T15:23:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,625 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/40fd8f6eb9104e838e613358d47d217d 2024-11-22T15:23:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,627 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2, entries=150, sequenceid=317, filesize=12.0 K 2024-11-22T15:23:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,628 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 661abb5eb8be4eaf4f236a86a23909c7 in 248ms, sequenceid=317, compaction requested=true 2024-11-22T15:23:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-22T15:23:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-22T15:23:58,629 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1d11d9c819f348c3b365aa8045c3f82c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1d11d9c819f348c3b365aa8045c3f82c 2024-11-22T15:23:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,631 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-22T15:23:58,631 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4800 sec 2024-11-22T15:23:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 3.4840 sec 2024-11-22T15:23:58,633 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/09ae589b22a6476a86045f4ecff27a99 2024-11-22T15:23:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,634 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a0de90f391654baf86d2b5d52b46a6c7 2024-11-22T15:23:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,635 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/27744129772d44048dcda2f383347546 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/27744129772d44048dcda2f383347546 2024-11-22T15:23:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,638 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3a8a108a6e814a649535b3f112efeebd 2024-11-22T15:23:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,639 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/73b55fb0701b48f3a184b8d29ac4dbed 2024-11-22T15:23:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,644 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/501741b7029b41b99ce3fccb9c080838 2024-11-22T15:23:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,647 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/516c9502ef43401f8a75589b194f174c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9b6a78dcab104426ab5604c5ef40cace, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5fd7dca0956944c9af204e0b19b0bcbc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/42e658ac4a8b40bbac4c4ae574d18aea, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ab0f1e0e5f404617bf592eec7135fd58, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2] to archive 2024-11-22T15:23:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,648 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:23:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,651 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/f1105958470f4c1c88b6a2f735816768 2024-11-22T15:23:58,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,654 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/c0dc48514bc14bafa2ad90518fc61104 2024-11-22T15:23:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,655 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/516c9502ef43401f8a75589b194f174c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/516c9502ef43401f8a75589b194f174c 2024-11-22T15:23:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,656 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0edb27edb6144d858f6ab436fdf2035e 2024-11-22T15:23:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,657 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6fff161c3f0f453d9e3f482c9095876a 2024-11-22T15:23:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,660 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9b6a78dcab104426ab5604c5ef40cace to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9b6a78dcab104426ab5604c5ef40cace 2024-11-22T15:23:58,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,664 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/da76b0310eb94db4892be2f6d9dfacd9 2024-11-22T15:23:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,668 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/fb57a54887f24e3eb7544bbab614585d 2024-11-22T15:23:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,670 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5fd7dca0956944c9af204e0b19b0bcbc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5fd7dca0956944c9af204e0b19b0bcbc 2024-11-22T15:23:58,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,675 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a42056b8b6af4254a0409da845577704 2024-11-22T15:23:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ca93c43c73a74c9d96573f0bdf49ddcd 2024-11-22T15:23:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,678 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/63afbe70dcb748e8abbe3c95e91f8908 2024-11-22T15:23:58,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,682 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/42e658ac4a8b40bbac4c4ae574d18aea to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/42e658ac4a8b40bbac4c4ae574d18aea 2024-11-22T15:23:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,684 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/1c861e94d9654997875a5dc1759db1ed 2024-11-22T15:23:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,686 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5f1e85641d624ee0b2d5b2c8afbd401f 2024-11-22T15:23:58,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,696 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ab0f1e0e5f404617bf592eec7135fd58 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ab0f1e0e5f404617bf592eec7135fd58 2024-11-22T15:23:58,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,701 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/0a3ea6d1dca642f6b13a58d9e3ddc440 2024-11-22T15:23:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,703 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/092ca142a40c49b68f29e233d75eed48 2024-11-22T15:23:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,704 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/77927f992d0b:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/9250615c5d224a5bbc7fc35adc1aeda2 2024-11-22T15:23:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:59,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-22T15:23:59,254 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-22T15:23:59,264 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-22T15:23:59,266 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:59,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T15:23:59,266 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:59,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:59,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f46ba550a520438c9d13b2a2f478ee1c_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289039248/Put/seqid=0 2024-11-22T15:23:59,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742308_1484 (size=17534) 2024-11-22T15:23:59,309 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,315 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f46ba550a520438c9d13b2a2f478ee1c_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f46ba550a520438c9d13b2a2f478ee1c_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,316 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/4faeb4f24b9c462ba2c028f3dd7af12a, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/4faeb4f24b9c462ba2c028f3dd7af12a is 175, key is test_row_0/A:col10/1732289039248/Put/seqid=0 2024-11-22T15:23:59,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742309_1485 (size=48639) 2024-11-22T15:23:59,355 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/4faeb4f24b9c462ba2c028f3dd7af12a 2024-11-22T15:23:59,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/313a0ca60f8e4195872a4032091b4a56 is 50, key is test_row_0/B:col10/1732289039248/Put/seqid=0 2024-11-22T15:23:59,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T15:23:59,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742310_1486 (size=12301) 2024-11-22T15:23:59,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/313a0ca60f8e4195872a4032091b4a56 2024-11-22T15:23:59,421 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T15:23:59,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:59,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:59,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:23:59,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289099409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289099411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ae99c736547f4ff2a03bdc6ab6314c5f is 50, key is test_row_0/C:col10/1732289039248/Put/seqid=0 2024-11-22T15:23:59,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742311_1487 (size=12301) 2024-11-22T15:23:59,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ae99c736547f4ff2a03bdc6ab6314c5f 2024-11-22T15:23:59,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/4faeb4f24b9c462ba2c028f3dd7af12a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a 2024-11-22T15:23:59,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a, entries=250, sequenceid=328, filesize=47.5 K 2024-11-22T15:23:59,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/313a0ca60f8e4195872a4032091b4a56 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56 2024-11-22T15:23:59,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56, entries=150, sequenceid=328, filesize=12.0 K 2024-11-22T15:23:59,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ae99c736547f4ff2a03bdc6ab6314c5f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f 2024-11-22T15:23:59,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289099524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289099525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f, entries=150, sequenceid=328, filesize=12.0 K 2024-11-22T15:23:59,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 661abb5eb8be4eaf4f236a86a23909c7 in 284ms, sequenceid=328, compaction requested=true 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:23:59,534 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:59,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:23:59,534 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:23:59,536 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:59,536 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:23:59,536 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,536 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/c900f769635a48f885c69c33fe147980, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=48.7 K 2024-11-22T15:23:59,536 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143052 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:23:59,536 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:23:59,536 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,536 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=139.7 K 2024-11-22T15:23:59,536 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,536 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a] 2024-11-22T15:23:59,537 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c900f769635a48f885c69c33fe147980, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:23:59,537 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96c45a8957b24d91918c60bff6bef3c1, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:23:59,537 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 58aa5055ca7d4091a05b7ba278cf7dc9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732289036702 2024-11-22T15:23:59,537 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09c361749e40448fb10b071d8a7f63e3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732289036702 2024-11-22T15:23:59,537 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 95df3d3c3e1f4520bf3f489f0383f04b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289036968 2024-11-22T15:23:59,538 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa003385f26c43d18a4aa71e70e81d0b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289036968 2024-11-22T15:23:59,538 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 313a0ca60f8e4195872a4032091b4a56, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289039042 2024-11-22T15:23:59,538 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4faeb4f24b9c462ba2c028f3dd7af12a, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289039042 2024-11-22T15:23:59,546 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#416 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:23:59,547 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a23be3e99d5f45178bfdef1868f340fb is 50, key is test_row_0/B:col10/1732289039248/Put/seqid=0 2024-11-22T15:23:59,563 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T15:23:59,573 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,574 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:59,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,583 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411223cad3395b78744cd9783cd7945fe1eff_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,585 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411223cad3395b78744cd9783cd7945fe1eff_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,586 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223cad3395b78744cd9783cd7945fe1eff_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742312_1488 (size=12439) 2024-11-22T15:23:59,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221917181e87db453d81dfd4cabb217d33_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289039409/Put/seqid=0 2024-11-22T15:23:59,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742313_1489 (size=4469) 2024-11-22T15:23:59,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742314_1490 (size=12454) 2024-11-22T15:23:59,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,688 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221917181e87db453d81dfd4cabb217d33_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221917181e87db453d81dfd4cabb217d33_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a23f49d469964f188979661d80f613a1, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a23f49d469964f188979661d80f613a1 is 175, key is test_row_0/A:col10/1732289039409/Put/seqid=0 2024-11-22T15:23:59,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742315_1491 (size=31255) 2024-11-22T15:23:59,711 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a23f49d469964f188979661d80f613a1 2024-11-22T15:23:59,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/61dbbab0d0c44878851f9d5cc2ad619e is 50, key is test_row_0/B:col10/1732289039409/Put/seqid=0 2024-11-22T15:23:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:23:59,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742316_1492 (size=12301) 2024-11-22T15:23:59,747 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/61dbbab0d0c44878851f9d5cc2ad619e 2024-11-22T15:23:59,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8e5845ca371a43478dc0ac2db15a7644 is 50, key is test_row_0/C:col10/1732289039409/Put/seqid=0 2024-11-22T15:23:59,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742317_1493 (size=12301) 2024-11-22T15:23:59,768 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8e5845ca371a43478dc0ac2db15a7644 2024-11-22T15:23:59,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a23f49d469964f188979661d80f613a1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1 2024-11-22T15:23:59,774 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1, entries=150, sequenceid=353, filesize=30.5 K 2024-11-22T15:23:59,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/61dbbab0d0c44878851f9d5cc2ad619e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e 2024-11-22T15:23:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,781 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e, entries=150, sequenceid=353, filesize=12.0 K 2024-11-22T15:23:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8e5845ca371a43478dc0ac2db15a7644 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644 2024-11-22T15:23:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,789 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644, entries=150, sequenceid=353, filesize=12.0 K 2024-11-22T15:23:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,790 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 661abb5eb8be4eaf4f236a86a23909c7 in 216ms, sequenceid=353, compaction requested=true 2024-11-22T15:23:59,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:59,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:23:59,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-22T15:23:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-22T15:23:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-22T15:23:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 524 msec 2024-11-22T15:23:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 528 msec 2024-11-22T15:23:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:23:59,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:23:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122448eb70eeb8a4629899d9b95a16d5309_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289039776/Put/seqid=0 2024-11-22T15:23:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742318_1494 (size=17534) 2024-11-22T15:23:59,841 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:23:59,843 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122448eb70eeb8a4629899d9b95a16d5309_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122448eb70eeb8a4629899d9b95a16d5309_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:23:59,844 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/3cfaa64de02842d9bf32160faabf71b4, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:23:59,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/3cfaa64de02842d9bf32160faabf71b4 is 175, key is test_row_0/A:col10/1732289039776/Put/seqid=0 2024-11-22T15:23:59,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742319_1495 (size=48639) 2024-11-22T15:23:59,863 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/3cfaa64de02842d9bf32160faabf71b4 2024-11-22T15:23:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-22T15:23:59,868 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-22T15:23:59,870 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:23:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-22T15:23:59,871 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:23:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:23:59,872 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:23:59,872 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:23:59,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3f2ef7cf028a435b962a643c20f23124 is 50, key is test_row_0/B:col10/1732289039776/Put/seqid=0 2024-11-22T15:23:59,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742320_1496 (size=12301) 2024-11-22T15:23:59,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3f2ef7cf028a435b962a643c20f23124 2024-11-22T15:23:59,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289099896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8ff986aef920428baf083e68b1fc62cd is 50, key is test_row_0/C:col10/1732289039776/Put/seqid=0 2024-11-22T15:23:59,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289099897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289099898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:23:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289099897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:23:59,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742321_1497 (size=12301) 2024-11-22T15:23:59,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8ff986aef920428baf083e68b1fc62cd 2024-11-22T15:23:59,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/3cfaa64de02842d9bf32160faabf71b4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4 2024-11-22T15:23:59,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4, entries=250, sequenceid=365, filesize=47.5 K 2024-11-22T15:23:59,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/3f2ef7cf028a435b962a643c20f23124 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124 2024-11-22T15:23:59,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124, entries=150, sequenceid=365, filesize=12.0 K 2024-11-22T15:23:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/8ff986aef920428baf083e68b1fc62cd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd 2024-11-22T15:23:59,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd, entries=150, sequenceid=365, filesize=12.0 K 2024-11-22T15:23:59,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 661abb5eb8be4eaf4f236a86a23909c7 in 151ms, sequenceid=365, compaction requested=true 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:23:59,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-22T15:23:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:24:00,008 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a23be3e99d5f45178bfdef1868f340fb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a23be3e99d5f45178bfdef1868f340fb 2024-11-22T15:24:00,013 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into a23be3e99d5f45178bfdef1868f340fb(size=12.1 K), total size for store is 36.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:00,013 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,013 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=12, startTime=1732289039534; duration=0sec 2024-11-22T15:24:00,013 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-22T15:24:00,013 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:00,013 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:00,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:00,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:00,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:00,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:00,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. because compaction request was cancelled 2024-11-22T15:24:00,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:00,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:00,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:00,015 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-22T15:24:00,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,018 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74454 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-22T15:24:00,018 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:24:00,018 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,018 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cb9cd30f30094fc3b45985e5bd64f102, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=72.7 K 2024-11-22T15:24:00,018 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting cb9cd30f30094fc3b45985e5bd64f102, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732289035647 2024-11-22T15:24:00,019 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 90403198d8a44db0b3c0f3b33bee39a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732289036702 2024-11-22T15:24:00,019 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 02126ed47d304dac8e564d19f7518bf2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289036968 2024-11-22T15:24:00,019 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ae99c736547f4ff2a03bdc6ab6314c5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289039042 2024-11-22T15:24:00,020 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e5845ca371a43478dc0ac2db15a7644, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732289039391 2024-11-22T15:24:00,020 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ff986aef920428baf083e68b1fc62cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732289039762 2024-11-22T15:24:00,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-22T15:24:00,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:00,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a8a67dd790ca4f8d804c14eabfef0921_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289039896/Put/seqid=0 2024-11-22T15:24:00,042 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#417 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:00,043 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6cd9aed360c9469c96b6696af13a7b6f is 175, key is test_row_0/A:col10/1732289039248/Put/seqid=0 2024-11-22T15:24:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289100040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,055 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#425 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:00,055 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/a2a81017e3464625afdd5453eff4b5ba is 50, key is test_row_0/C:col10/1732289039776/Put/seqid=0 2024-11-22T15:24:00,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289100044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289100044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289100047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742322_1498 (size=14994) 2024-11-22T15:24:00,082 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:00,087 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a8a67dd790ca4f8d804c14eabfef0921_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8a67dd790ca4f8d804c14eabfef0921_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:00,089 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/233de41cb7324db2a57991b8a2d7cf80, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/233de41cb7324db2a57991b8a2d7cf80 is 175, key is test_row_0/A:col10/1732289039896/Put/seqid=0 2024-11-22T15:24:00,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742323_1499 (size=31393) 2024-11-22T15:24:00,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742324_1500 (size=12507) 2024-11-22T15:24:00,110 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/a2a81017e3464625afdd5453eff4b5ba as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a2a81017e3464625afdd5453eff4b5ba 2024-11-22T15:24:00,114 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into a2a81017e3464625afdd5453eff4b5ba(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,114 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=10, startTime=1732289039956; duration=0sec 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:00,114 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:00,116 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:00,116 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:24:00,116 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,116 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a23be3e99d5f45178bfdef1868f340fb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.2 K 2024-11-22T15:24:00,117 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a23be3e99d5f45178bfdef1868f340fb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289039042 2024-11-22T15:24:00,117 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 61dbbab0d0c44878851f9d5cc2ad619e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732289039391 2024-11-22T15:24:00,117 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f2ef7cf028a435b962a643c20f23124, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732289039762 2024-11-22T15:24:00,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742325_1501 (size=39949) 2024-11-22T15:24:00,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=391, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/233de41cb7324db2a57991b8a2d7cf80 2024-11-22T15:24:00,141 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:00,142 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/f43054983f4249a8b4e4f7d0aaca902e is 50, key is test_row_0/B:col10/1732289039776/Put/seqid=0 2024-11-22T15:24:00,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2940e11d10434961bf8d5ca15a16d188 is 50, key is test_row_0/B:col10/1732289039896/Put/seqid=0 2024-11-22T15:24:00,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289100154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289100163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289100164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:24:00,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289100165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,175 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-22T15:24:00,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:00,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:00,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742326_1502 (size=12541) 2024-11-22T15:24:00,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742327_1503 (size=12301) 2024-11-22T15:24:00,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2940e11d10434961bf8d5ca15a16d188 2024-11-22T15:24:00,208 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/f43054983f4249a8b4e4f7d0aaca902e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/f43054983f4249a8b4e4f7d0aaca902e 2024-11-22T15:24:00,215 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into f43054983f4249a8b4e4f7d0aaca902e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:00,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,215 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289039956; duration=0sec 2024-11-22T15:24:00,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:00,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:00,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T15:24:00,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:00,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:00,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. because compaction request was cancelled 2024-11-22T15:24:00,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:00,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/04a966d642074fdd9c427450cbc12d80 is 50, key is test_row_0/C:col10/1732289039896/Put/seqid=0 2024-11-22T15:24:00,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742328_1504 (size=12301) 2024-11-22T15:24:00,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/04a966d642074fdd9c427450cbc12d80 2024-11-22T15:24:00,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/233de41cb7324db2a57991b8a2d7cf80 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80 2024-11-22T15:24:00,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80, entries=200, sequenceid=391, filesize=39.0 K 2024-11-22T15:24:00,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/2940e11d10434961bf8d5ca15a16d188 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188 2024-11-22T15:24:00,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188, entries=150, sequenceid=391, filesize=12.0 K 2024-11-22T15:24:00,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/04a966d642074fdd9c427450cbc12d80 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80 2024-11-22T15:24:00,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80, entries=150, sequenceid=391, filesize=12.0 K 2024-11-22T15:24:00,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 661abb5eb8be4eaf4f236a86a23909c7 in 276ms, sequenceid=391, compaction requested=true 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T15:24:00,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 7 store files, 4 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:00,292 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119843 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:00,292 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction 2024-11-22T15:24:00,292 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,293 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=117.0 K 2024-11-22T15:24:00,293 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=false priority=9 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,293 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80] 2024-11-22T15:24:00,293 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a23f49d469964f188979661d80f613a1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=353 2024-11-22T15:24:00,293 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cfaa64de02842d9bf32160faabf71b4, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=365 2024-11-22T15:24:00,293 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 233de41cb7324db2a57991b8a2d7cf80, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=391 2024-11-22T15:24:00,307 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=false store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,323 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112264b74c9c051347bb98ee6126b49488ff_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,325 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=false file=d41d8cd98f00b204e9800998ecf8427e2024112264b74c9c051347bb98ee6126b49488ff_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,325 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112264b74c9c051347bb98ee6126b49488ff_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,329 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:00,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225ec2638df8824ab0a1015796b5508cf8_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289040028/Put/seqid=0 2024-11-22T15:24:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:00,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:00,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742329_1505 (size=4469) 2024-11-22T15:24:00,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742330_1506 (size=12454) 2024-11-22T15:24:00,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289100435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289100437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289100438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289100448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:24:00,497 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6cd9aed360c9469c96b6696af13a7b6f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f 2024-11-22T15:24:00,503 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 6cd9aed360c9469c96b6696af13a7b6f(size=30.7 K), total size for store is 147.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:00,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,503 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=12, startTime=1732289039534; duration=0sec 2024-11-22T15:24:00,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:00,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:00,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:00,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:00,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:00,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. because compaction request was cancelled 2024-11-22T15:24:00,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:00,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:00,506 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:00,506 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:00,506 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. because compaction request was cancelled 2024-11-22T15:24:00,506 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:00,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289100551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289100553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289100553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289100560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1732289100637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,643 DEBUG [Thread-1771 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:24:00,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289100760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289100760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289100762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:00,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289100771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:00,778 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#429 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:00,779 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/2f53e62b138d431597122341bba435d2 is 175, key is test_row_0/A:col10/1732289039896/Put/seqid=0 2024-11-22T15:24:00,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742331_1507 (size=31358) 2024-11-22T15:24:00,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:00,811 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/2f53e62b138d431597122341bba435d2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2 2024-11-22T15:24:00,820 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225ec2638df8824ab0a1015796b5508cf8_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225ec2638df8824ab0a1015796b5508cf8_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:00,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/1e7cd6fff05e483bbef72706cb4e279d, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:00,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/1e7cd6fff05e483bbef72706cb4e279d is 175, key is test_row_0/A:col10/1732289040028/Put/seqid=0 2024-11-22T15:24:00,828 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 2f53e62b138d431597122341bba435d2(size=30.6 K), total size for store is 61.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:00,828 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,828 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=9, startTime=1732289040290; duration=0sec 2024-11-22T15:24:00,828 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:00,828 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:00,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742332_1508 (size=31255) 2024-11-22T15:24:00,871 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=404, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/1e7cd6fff05e483bbef72706cb4e279d 2024-11-22T15:24:00,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/77c1fe67c3a242cb885f52f8ebe47a4b is 50, key is test_row_0/B:col10/1732289040028/Put/seqid=0 2024-11-22T15:24:00,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742333_1509 (size=12301) 2024-11-22T15:24:00,934 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/77c1fe67c3a242cb885f52f8ebe47a4b 2024-11-22T15:24:00,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ff090d3dc6a4433e84727788ee6b061d is 50, key is test_row_0/C:col10/1732289040028/Put/seqid=0 2024-11-22T15:24:00,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742334_1510 (size=12301) 2024-11-22T15:24:00,969 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ff090d3dc6a4433e84727788ee6b061d 2024-11-22T15:24:00,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/1e7cd6fff05e483bbef72706cb4e279d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d 2024-11-22T15:24:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:24:00,981 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d, entries=150, sequenceid=404, filesize=30.5 K 2024-11-22T15:24:00,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/77c1fe67c3a242cb885f52f8ebe47a4b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b 2024-11-22T15:24:00,988 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b, entries=150, sequenceid=404, filesize=12.0 K 2024-11-22T15:24:00,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ff090d3dc6a4433e84727788ee6b061d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d 2024-11-22T15:24:00,995 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d, entries=150, sequenceid=404, filesize=12.0 K 2024-11-22T15:24:00,995 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 661abb5eb8be4eaf4f236a86a23909c7 in 666ms, sequenceid=404, compaction requested=true 2024-11-22T15:24:00,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:00,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:00,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-22T15:24:00,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-22T15:24:00,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-22T15:24:00,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1240 sec 2024-11-22T15:24:00,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.1280 sec 2024-11-22T15:24:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:01,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:01,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289101086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d1390f31d68043d98c34bcf76d7a018f_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289101086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289101089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289101094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742335_1511 (size=14994) 2024-11-22T15:24:01,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289101190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289101198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289101203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289101204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289101400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289101409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289101411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289101412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,534 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:01,540 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122d1390f31d68043d98c34bcf76d7a018f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d1390f31d68043d98c34bcf76d7a018f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:01,541 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/458bbea260be4b58acafec26ffe2a1f8, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:01,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/458bbea260be4b58acafec26ffe2a1f8 is 175, key is test_row_0/A:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742336_1512 (size=39949) 2024-11-22T15:24:01,589 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=432, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/458bbea260be4b58acafec26ffe2a1f8 2024-11-22T15:24:01,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5ca96a0d37444d0eb79e1a6e1a886b63 is 50, key is test_row_0/B:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742337_1513 (size=12301) 2024-11-22T15:24:01,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5ca96a0d37444d0eb79e1a6e1a886b63 2024-11-22T15:24:01,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/e2634ac29a384073ae2352d4a27137d2 is 50, key is test_row_0/C:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289101705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742338_1514 (size=12301) 2024-11-22T15:24:01,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289101714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=432 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/e2634ac29a384073ae2352d4a27137d2 2024-11-22T15:24:01,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289101717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/458bbea260be4b58acafec26ffe2a1f8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8 2024-11-22T15:24:01,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:01,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289101724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:01,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8, entries=200, sequenceid=432, filesize=39.0 K 2024-11-22T15:24:01,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5ca96a0d37444d0eb79e1a6e1a886b63 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63 2024-11-22T15:24:01,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63, entries=150, sequenceid=432, filesize=12.0 K 2024-11-22T15:24:01,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/e2634ac29a384073ae2352d4a27137d2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2 2024-11-22T15:24:01,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2, entries=150, sequenceid=432, filesize=12.0 K 2024-11-22T15:24:01,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 661abb5eb8be4eaf4f236a86a23909c7 in 686ms, sequenceid=432, compaction requested=true 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:01,760 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:01,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:01,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:01,762 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133955 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:01,762 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:24:01,762 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:01,762 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=130.8 K 2024-11-22T15:24:01,762 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:01,762 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8] 2024-11-22T15:24:01,763 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49444 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:01,763 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:24:01,763 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:01,763 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/f43054983f4249a8b4e4f7d0aaca902e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=48.3 K 2024-11-22T15:24:01,764 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f43054983f4249a8b4e4f7d0aaca902e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732289039762 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cd9aed360c9469c96b6696af13a7b6f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732289039042 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2940e11d10434961bf8d5ca15a16d188, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732289039896 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 77c1fe67c3a242cb885f52f8ebe47a4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732289040028 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f53e62b138d431597122341bba435d2, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732289039896 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ca96a0d37444d0eb79e1a6e1a886b63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040436 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e7cd6fff05e483bbef72706cb4e279d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732289040028 2024-11-22T15:24:01,765 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 458bbea260be4b58acafec26ffe2a1f8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040421 2024-11-22T15:24:01,794 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:01,794 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/db0bdc6609ec4d41bae2716729dfde3c is 50, key is test_row_0/B:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,798 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:01,808 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411227d6af54220df4970b1aba5f7967f72db_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:01,810 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411227d6af54220df4970b1aba5f7967f72db_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:01,810 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227d6af54220df4970b1aba5f7967f72db_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:01,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742339_1515 (size=12677) 2024-11-22T15:24:01,845 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/db0bdc6609ec4d41bae2716729dfde3c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/db0bdc6609ec4d41bae2716729dfde3c 2024-11-22T15:24:01,851 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into db0bdc6609ec4d41bae2716729dfde3c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:01,851 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:01,851 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=12, startTime=1732289041760; duration=0sec 2024-11-22T15:24:01,851 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:01,851 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:01,851 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:01,852 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:01,853 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:24:01,853 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:01,853 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a2a81017e3464625afdd5453eff4b5ba, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=48.3 K 2024-11-22T15:24:01,854 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a2a81017e3464625afdd5453eff4b5ba, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1732289039762 2024-11-22T15:24:01,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742340_1516 (size=4469) 2024-11-22T15:24:01,855 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 04a966d642074fdd9c427450cbc12d80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732289039896 2024-11-22T15:24:01,855 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ff090d3dc6a4433e84727788ee6b061d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=404, earliestPutTs=1732289040028 2024-11-22T15:24:01,856 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#437 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:01,856 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e2634ac29a384073ae2352d4a27137d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040436 2024-11-22T15:24:01,856 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/97c4733754f04aae81dab27c3bac1362 is 175, key is test_row_0/A:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,881 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:01,882 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/21965030d1e84588a21d3b44061ebb76 is 50, key is test_row_0/C:col10/1732289040436/Put/seqid=0 2024-11-22T15:24:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742341_1517 (size=31631) 2024-11-22T15:24:01,904 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/97c4733754f04aae81dab27c3bac1362 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362 2024-11-22T15:24:01,908 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 97c4733754f04aae81dab27c3bac1362(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:01,909 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:01,909 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=12, startTime=1732289041760; duration=0sec 2024-11-22T15:24:01,909 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:01,909 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:01,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742342_1518 (size=12643) 2024-11-22T15:24:01,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-22T15:24:01,976 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-22T15:24:01,978 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-22T15:24:01,979 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T15:24:01,980 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:01,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T15:24:02,131 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,132 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b0501c3a2a50431ba4e9b1ac30dca7d0_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289041085/Put/seqid=0 2024-11-22T15:24:02,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742343_1519 (size=12454) 2024-11-22T15:24:02,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:02,201 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b0501c3a2a50431ba4e9b1ac30dca7d0_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0501c3a2a50431ba4e9b1ac30dca7d0_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:02,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6ee6a9f89af34077a871c52748d25129, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:02,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6ee6a9f89af34077a871c52748d25129 is 175, key is test_row_0/A:col10/1732289041085/Put/seqid=0 2024-11-22T15:24:02,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:02,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:02,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742344_1520 (size=31255) 2024-11-22T15:24:02,242 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=444, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6ee6a9f89af34077a871c52748d25129 2024-11-22T15:24:02,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/9219b88c568e43f9989da3e397528add is 50, key is test_row_0/B:col10/1732289041085/Put/seqid=0 2024-11-22T15:24:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742345_1521 (size=12301) 2024-11-22T15:24:02,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T15:24:02,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289102273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289102282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,286 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/9219b88c568e43f9989da3e397528add 2024-11-22T15:24:02,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 is 50, key is test_row_0/C:col10/1732289041085/Put/seqid=0 2024-11-22T15:24:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289102282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289102283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742346_1522 (size=12301) 2024-11-22T15:24:02,335 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 2024-11-22T15:24:02,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6ee6a9f89af34077a871c52748d25129 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129 2024-11-22T15:24:02,344 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/21965030d1e84588a21d3b44061ebb76 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/21965030d1e84588a21d3b44061ebb76 2024-11-22T15:24:02,344 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129, entries=150, sequenceid=444, filesize=30.5 K 2024-11-22T15:24:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/9219b88c568e43f9989da3e397528add as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add 2024-11-22T15:24:02,350 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 21965030d1e84588a21d3b44061ebb76(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:02,350 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add, entries=150, sequenceid=444, filesize=12.0 K 2024-11-22T15:24:02,350 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:02,350 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=12, startTime=1732289041760; duration=0sec 2024-11-22T15:24:02,350 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:02,350 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:02,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 2024-11-22T15:24:02,353 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1, entries=150, sequenceid=444, filesize=12.0 K 2024-11-22T15:24:02,354 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 661abb5eb8be4eaf4f236a86a23909c7 in 222ms, sequenceid=444, compaction requested=false 2024-11-22T15:24:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-22T15:24:02,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-22T15:24:02,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-22T15:24:02,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 375 msec 2024-11-22T15:24:02,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 381 msec 2024-11-22T15:24:02,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:02,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:02,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:02,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289102393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289102399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289102400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,408 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289102401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b7139ff281804174a1d20f425a4c37de_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:02,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742347_1523 (size=14994) 2024-11-22T15:24:02,467 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:02,472 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b7139ff281804174a1d20f425a4c37de_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b7139ff281804174a1d20f425a4c37de_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:02,473 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0269cdd28ade4d38a9517b25c7ee46a8, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:02,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0269cdd28ade4d38a9517b25c7ee46a8 is 175, key is test_row_0/A:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:02,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289102504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742348_1524 (size=39949) 2024-11-22T15:24:02,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289102509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289102510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-22T15:24:02,581 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-22T15:24:02,582 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:02,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-22T15:24:02,584 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:02,584 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:02,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:02,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:02,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289102603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:02,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289102710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289102715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289102718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:02,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:02,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:02,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:02,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:02,913 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=472, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0269cdd28ade4d38a9517b25c7ee46a8 2024-11-22T15:24:02,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289102910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:02,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0c9096b2eff4484d8c88187ac9fce95c is 50, key is test_row_0/B:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:02,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742349_1525 (size=12301) 2024-11-22T15:24:02,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0c9096b2eff4484d8c88187ac9fce95c 2024-11-22T15:24:02,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ef33ae7a8e644604b6177e981580a594 is 50, key is test_row_0/C:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:03,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742350_1526 (size=12301) 2024-11-22T15:24:03,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289103013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289103021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289103026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:03,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:03,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:03,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:03,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,348 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:03,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:03,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:03,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ef33ae7a8e644604b6177e981580a594 2024-11-22T15:24:03,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/0269cdd28ade4d38a9517b25c7ee46a8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8 2024-11-22T15:24:03,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8, entries=200, sequenceid=472, filesize=39.0 K 2024-11-22T15:24:03,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/0c9096b2eff4484d8c88187ac9fce95c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c 2024-11-22T15:24:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c, entries=150, sequenceid=472, filesize=12.0 K 2024-11-22T15:24:03,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ef33ae7a8e644604b6177e981580a594 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594 2024-11-22T15:24:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594, entries=150, sequenceid=472, filesize=12.0 K 2024-11-22T15:24:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 661abb5eb8be4eaf4f236a86a23909c7 in 1038ms, sequenceid=472, compaction requested=true 2024-11-22T15:24:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:03,426 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:03,426 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,427 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:03,427 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:24:03,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,427 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37279 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:03,427 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,427 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:24:03,427 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,427 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=100.4 K 2024-11-22T15:24:03,427 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,427 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/db0bdc6609ec4d41bae2716729dfde3c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.4 K 2024-11-22T15:24:03,427 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8] 2024-11-22T15:24:03,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,428 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97c4733754f04aae81dab27c3bac1362, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040436 2024-11-22T15:24:03,428 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting db0bdc6609ec4d41bae2716729dfde3c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040436 2024-11-22T15:24:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,428 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9219b88c568e43f9989da3e397528add, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732289041085 2024-11-22T15:24:03,428 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ee6a9f89af34077a871c52748d25129, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732289041085 2024-11-22T15:24:03,428 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c9096b2eff4484d8c88187ac9fce95c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,429 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0269cdd28ade4d38a9517b25c7ee46a8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:03,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:03,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,458 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#446 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:03,458 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/28c02be9dc7a46358ab90bd431613449 is 50, key is test_row_0/B:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:03,459 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122a12d54b3ba1a4795926e0ce4849db668_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:03,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122a12d54b3ba1a4795926e0ce4849db668_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:03,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122a12d54b3ba1a4795926e0ce4849db668_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:03,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742351_1527 (size=12779) 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742352_1528 (size=4469) 2024-11-22T15:24:03,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,483 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#445 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,484 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ce4cec3040354d84ac5ceb6ac99ac727 is 175, key is test_row_0/A:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,484 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/28c02be9dc7a46358ab90bd431613449 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/28c02be9dc7a46358ab90bd431613449 2024-11-22T15:24:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,490 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into 28c02be9dc7a46358ab90bd431613449(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,490 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289043426; duration=0sec 2024-11-22T15:24:03,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:03,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:03,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37245 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:03,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:24:03,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,491 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,491 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/21965030d1e84588a21d3b44061ebb76, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.4 K 2024-11-22T15:24:03,492 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 21965030d1e84588a21d3b44061ebb76, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=432, earliestPutTs=1732289040436 2024-11-22T15:24:03,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,492 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fdd253d3fe0461a8cdf2ea381d5b4b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732289041085 2024-11-22T15:24:03,492 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ef33ae7a8e644604b6177e981580a594, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:03,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742353_1529 (size=31733) 2024-11-22T15:24:03,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-22T15:24:03,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:03,502 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:03,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,504 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:03,505 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/e5f9a250ed3f4160b3d01584d2d7ceb5 is 50, key is test_row_0/C:col10/1732289042386/Put/seqid=0 2024-11-22T15:24:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,505 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/ce4cec3040354d84ac5ceb6ac99ac727 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727 2024-11-22T15:24:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into ce4cec3040354d84ac5ceb6ac99ac727(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:03,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,512 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:03,512 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289043426; duration=0sec 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,513 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:03,513 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:03,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742354_1530 (size=12745) 2024-11-22T15:24:03,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdaa534810b34242b870f5db8eb8ce28_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289042398/Put/seqid=0 2024-11-22T15:24:03,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742355_1531 (size=9914) 2024-11-22T15:24:03,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:03,574 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cdaa534810b34242b870f5db8eb8ce28_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdaa534810b34242b870f5db8eb8ce28_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:03,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/328e8a168ed24f02bbb5f2d02f2d397b, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:03,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/328e8a168ed24f02bbb5f2d02f2d397b is 175, key is test_row_0/A:col10/1732289042398/Put/seqid=0 2024-11-22T15:24:03,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289103576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289103577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289103579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742356_1532 (size=22561) 2024-11-22T15:24:03,592 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=483, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/328e8a168ed24f02bbb5f2d02f2d397b 2024-11-22T15:24:03,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289103585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/679353d8dd56495081df9f1bcd11dc41 is 50, key is test_row_0/B:col10/1732289042398/Put/seqid=0 2024-11-22T15:24:03,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742357_1533 (size=9857) 2024-11-22T15:24:03,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:03,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289103688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289103688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289103689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289103699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289103899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289103900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289103900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:03,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289103909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:03,961 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/e5f9a250ed3f4160b3d01584d2d7ceb5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e5f9a250ed3f4160b3d01584d2d7ceb5 2024-11-22T15:24:03,966 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into e5f9a250ed3f4160b3d01584d2d7ceb5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:03,966 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:03,966 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289043430; duration=0sec 2024-11-22T15:24:03,966 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:03,966 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:04,033 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/679353d8dd56495081df9f1bcd11dc41 2024-11-22T15:24:04,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6dcbc168fc784a53ab0489993fd0e32f is 50, key is test_row_0/C:col10/1732289042398/Put/seqid=0 2024-11-22T15:24:04,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742358_1534 (size=9857) 2024-11-22T15:24:04,060 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6dcbc168fc784a53ab0489993fd0e32f 2024-11-22T15:24:04,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/328e8a168ed24f02bbb5f2d02f2d397b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b 2024-11-22T15:24:04,073 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b, entries=100, sequenceid=483, filesize=22.0 K 2024-11-22T15:24:04,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/679353d8dd56495081df9f1bcd11dc41 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41 2024-11-22T15:24:04,080 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41, entries=100, sequenceid=483, filesize=9.6 K 2024-11-22T15:24:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6dcbc168fc784a53ab0489993fd0e32f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f 2024-11-22T15:24:04,085 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f, entries=100, sequenceid=483, filesize=9.6 K 2024-11-22T15:24:04,086 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 661abb5eb8be4eaf4f236a86a23909c7 in 584ms, sequenceid=483, compaction requested=false 2024-11-22T15:24:04,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:04,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:04,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-22T15:24:04,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-22T15:24:04,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-22T15:24:04,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5030 sec 2024-11-22T15:24:04,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.5060 sec 2024-11-22T15:24:04,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T15:24:04,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:04,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:04,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289104238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289104238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289104239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289104240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122238f793acbd84e84860a758ca39b8fb8_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:04,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742359_1535 (size=14994) 2024-11-22T15:24:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289104347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289104348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289104348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289104348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289104554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289104555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289104555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289104557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-22T15:24:04,690 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-22T15:24:04,691 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:04,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-22T15:24:04,693 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:04,695 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:04,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:04,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:04,709 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:04,712 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122238f793acbd84e84860a758ca39b8fb8_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122238f793acbd84e84860a758ca39b8fb8_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:04,713 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a1eb6f83aebe46b884bcd177f6102689, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:04,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a1eb6f83aebe46b884bcd177f6102689 is 175, key is test_row_0/A:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:04,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742360_1536 (size=39949) 2024-11-22T15:24:04,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:04,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:04,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:04,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:04,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:04,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:04,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:04,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289104858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:04,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289104861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289104863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:04,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289104871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:05,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:05,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:05,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,121 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=515, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a1eb6f83aebe46b884bcd177f6102689 2024-11-22T15:24:05,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1db96e21a6ab47fcb55c744693d51eab is 50, key is test_row_0/B:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:05,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742361_1537 (size=12301) 2024-11-22T15:24:05,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1db96e21a6ab47fcb55c744693d51eab 2024-11-22T15:24:05,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/3cb447ff2e594c7abea5759eef42ce03 is 50, key is test_row_0/C:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742362_1538 (size=12301) 2024-11-22T15:24:05,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:05,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:05,320 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:05,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:05,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:05,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289105363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289105369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289105370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:05,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289105376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:05,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:05,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/3cb447ff2e594c7abea5759eef42ce03 2024-11-22T15:24:05,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/a1eb6f83aebe46b884bcd177f6102689 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689 2024-11-22T15:24:05,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689, entries=200, sequenceid=515, filesize=39.0 K 2024-11-22T15:24:05,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/1db96e21a6ab47fcb55c744693d51eab as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab 2024-11-22T15:24:05,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab, entries=150, sequenceid=515, filesize=12.0 K 2024-11-22T15:24:05,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/3cb447ff2e594c7abea5759eef42ce03 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03 2024-11-22T15:24:05,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03, entries=150, sequenceid=515, filesize=12.0 K 2024-11-22T15:24:05,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for 661abb5eb8be4eaf4f236a86a23909c7 in 1329ms, sequenceid=515, compaction requested=true 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:05,552 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:05,552 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:05,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:05,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:05,553 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34937 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:05,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:05,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:24:05,553 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,553 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=92.0 K 2024-11-22T15:24:05,553 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,553 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689] 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:24:05,554 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,554 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/28c02be9dc7a46358ab90bd431613449, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=34.1 K 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 28c02be9dc7a46358ab90bd431613449, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce4cec3040354d84ac5ceb6ac99ac727, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 679353d8dd56495081df9f1bcd11dc41, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732289042398 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 328e8a168ed24f02bbb5f2d02f2d397b, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732289042398 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1db96e21a6ab47fcb55c744693d51eab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:05,554 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1eb6f83aebe46b884bcd177f6102689, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:05,558 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:05,559 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#454 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:05,560 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/b6027279550e40599ddc6a93d6559cb3 is 50, key is test_row_0/B:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:05,560 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112258a325921657424f8abc88e7113f1d0b_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:05,562 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112258a325921657424f8abc88e7113f1d0b_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:05,562 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112258a325921657424f8abc88e7113f1d0b_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:05,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742363_1539 (size=12881) 2024-11-22T15:24:05,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742364_1540 (size=4469) 2024-11-22T15:24:05,565 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#455 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:05,565 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/18a161a9fa25430787cebaf87556caa1 is 175, key is test_row_0/A:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742365_1541 (size=31835) 2024-11-22T15:24:05,624 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:05,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,625 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:05,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:05,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221a1196f812d44e13880012b88f0c63f9_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289044238/Put/seqid=0 2024-11-22T15:24:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742366_1542 (size=12454) 2024-11-22T15:24:05,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:05,966 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/b6027279550e40599ddc6a93d6559cb3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b6027279550e40599ddc6a93d6559cb3 2024-11-22T15:24:05,970 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into b6027279550e40599ddc6a93d6559cb3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:05,970 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:05,970 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289045552; duration=0sec 2024-11-22T15:24:05,970 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:05,970 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:05,970 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:05,971 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34903 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:05,971 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:24:05,971 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:05,972 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e5f9a250ed3f4160b3d01584d2d7ceb5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=34.1 K 2024-11-22T15:24:05,972 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e5f9a250ed3f4160b3d01584d2d7ceb5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732289042256 2024-11-22T15:24:05,972 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dcbc168fc784a53ab0489993fd0e32f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1732289042398 2024-11-22T15:24:05,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/18a161a9fa25430787cebaf87556caa1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1 2024-11-22T15:24:05,972 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cb447ff2e594c7abea5759eef42ce03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:05,976 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 18a161a9fa25430787cebaf87556caa1(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:05,976 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:05,976 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289045552; duration=0sec 2024-11-22T15:24:05,976 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:05,976 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:05,978 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#457 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:05,979 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ee0d5cf4edcf42c8afdd7ce7621ea022 is 50, key is test_row_0/C:col10/1732289043584/Put/seqid=0 2024-11-22T15:24:05,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742367_1543 (size=12847) 2024-11-22T15:24:06,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:06,035 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221a1196f812d44e13880012b88f0c63f9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221a1196f812d44e13880012b88f0c63f9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:06,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6be2458c2d4040379c3d51f4710b0cee, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:06,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6be2458c2d4040379c3d51f4710b0cee is 175, key is test_row_0/A:col10/1732289044238/Put/seqid=0 2024-11-22T15:24:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742368_1544 (size=31255) 2024-11-22T15:24:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:06,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. as already flushing 2024-11-22T15:24:06,391 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/ee0d5cf4edcf42c8afdd7ce7621ea022 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ee0d5cf4edcf42c8afdd7ce7621ea022 2024-11-22T15:24:06,394 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into ee0d5cf4edcf42c8afdd7ce7621ea022(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:06,394 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:06,394 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289045552; duration=0sec 2024-11-22T15:24:06,394 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:06,394 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:06,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289106406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289106407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289106408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289106408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,440 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=522, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6be2458c2d4040379c3d51f4710b0cee 2024-11-22T15:24:06,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5792540eadbf4af8b686e06304acdd28 is 50, key is test_row_0/B:col10/1732289044238/Put/seqid=0 2024-11-22T15:24:06,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742369_1545 (size=12301) 2024-11-22T15:24:06,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289106509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289106514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289106514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289106515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289106714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289106719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289106719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:06,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289106720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:06,784 DEBUG [Thread-1776 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:52970 2024-11-22T15:24:06,784 DEBUG [Thread-1776 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:06,784 DEBUG [Thread-1778 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:52970 2024-11-22T15:24:06,784 DEBUG [Thread-1778 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:06,786 DEBUG [Thread-1780 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:52970 2024-11-22T15:24:06,786 DEBUG [Thread-1780 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:06,788 DEBUG [Thread-1784 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:52970 2024-11-22T15:24:06,788 DEBUG [Thread-1784 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:06,789 DEBUG [Thread-1782 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:52970 2024-11-22T15:24:06,789 DEBUG [Thread-1782 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:06,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:06,850 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5792540eadbf4af8b686e06304acdd28 2024-11-22T15:24:06,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/cbc432a8b92d4b78b82dfd290e70699a is 50, key is test_row_0/C:col10/1732289044238/Put/seqid=0 2024-11-22T15:24:06,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742370_1546 (size=12301) 2024-11-22T15:24:07,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:07,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47526 deadline: 1732289107018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:07,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:07,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47522 deadline: 1732289107023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:07,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:07,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47520 deadline: 1732289107023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:07,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:07,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47532 deadline: 1732289107025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:07,263 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/cbc432a8b92d4b78b82dfd290e70699a 2024-11-22T15:24:07,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/6be2458c2d4040379c3d51f4710b0cee as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee 2024-11-22T15:24:07,275 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee, entries=150, sequenceid=522, filesize=30.5 K 2024-11-22T15:24:07,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/5792540eadbf4af8b686e06304acdd28 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28 2024-11-22T15:24:07,280 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28, entries=150, sequenceid=522, filesize=12.0 K 2024-11-22T15:24:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/cbc432a8b92d4b78b82dfd290e70699a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a 2024-11-22T15:24:07,284 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a, entries=150, sequenceid=522, filesize=12.0 K 2024-11-22T15:24:07,284 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 661abb5eb8be4eaf4f236a86a23909c7 in 1659ms, sequenceid=522, compaction requested=false 2024-11-22T15:24:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-22T15:24:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-22T15:24:07,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-22T15:24:07,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5900 sec 2024-11-22T15:24:07,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.5960 sec 2024-11-22T15:24:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:07,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-22T15:24:07,525 DEBUG [Thread-1765 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bf5e2f0 to 127.0.0.1:52970 2024-11-22T15:24:07,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:07,525 DEBUG [Thread-1765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:07,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:07,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:07,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:07,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:07,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:07,531 DEBUG [Thread-1767 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:52970 2024-11-22T15:24:07,531 DEBUG [Thread-1773 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:52970 2024-11-22T15:24:07,531 DEBUG [Thread-1769 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:52970 2024-11-22T15:24:07,531 DEBUG [Thread-1767 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:07,531 DEBUG [Thread-1769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:07,531 DEBUG [Thread-1773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:07,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221ad3af9e2fae4e93899ef18f47737e3e_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_0/A:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:07,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742371_1547 (size=12454) 2024-11-22T15:24:07,940 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:07,944 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221ad3af9e2fae4e93899ef18f47737e3e_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ad3af9e2fae4e93899ef18f47737e3e_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:07,945 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/9713b534acea4f43835d8389c0a7f3e6, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:07,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/9713b534acea4f43835d8389c0a7f3e6 is 175, key is test_row_0/A:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:07,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742372_1548 (size=31255) 2024-11-22T15:24:08,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=555, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/9713b534acea4f43835d8389c0a7f3e6 2024-11-22T15:24:08,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/35491e9d346145eb8199636e1857a15d is 50, key is test_row_0/B:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:08,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742373_1549 (size=12301) 2024-11-22T15:24:08,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/35491e9d346145eb8199636e1857a15d 2024-11-22T15:24:08,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6d8043ca993c490abad9b1789978ebae is 50, key is test_row_0/C:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:08,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742374_1550 (size=12301) 2024-11-22T15:24:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-22T15:24:08,801 INFO [Thread-1775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-22T15:24:09,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6d8043ca993c490abad9b1789978ebae 2024-11-22T15:24:09,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/9713b534acea4f43835d8389c0a7f3e6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6 2024-11-22T15:24:09,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6, entries=150, sequenceid=555, filesize=30.5 K 2024-11-22T15:24:09,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/35491e9d346145eb8199636e1857a15d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d 2024-11-22T15:24:09,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d, entries=150, sequenceid=555, filesize=12.0 K 2024-11-22T15:24:09,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6d8043ca993c490abad9b1789978ebae as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae 2024-11-22T15:24:09,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae, entries=150, sequenceid=555, filesize=12.0 K 2024-11-22T15:24:09,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 661abb5eb8be4eaf4f236a86a23909c7 in 1689ms, sequenceid=555, compaction requested=true 2024-11-22T15:24:09,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:09,215 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 661abb5eb8be4eaf4f236a86a23909c7:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:09,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:09,215 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/A is initiating minor compaction (all files) 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/B is initiating minor compaction (all files) 2024-11-22T15:24:09,216 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/B in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:09,216 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/A in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:09,216 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b6027279550e40599ddc6a93d6559cb3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.6 K 2024-11-22T15:24:09,216 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=92.1 K 2024-11-22T15:24:09,216 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6] 2024-11-22T15:24:09,216 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b6027279550e40599ddc6a93d6559cb3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:09,217 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18a161a9fa25430787cebaf87556caa1, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:09,217 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5792540eadbf4af8b686e06304acdd28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1732289044236 2024-11-22T15:24:09,217 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6be2458c2d4040379c3d51f4710b0cee, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1732289044236 2024-11-22T15:24:09,217 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 35491e9d346145eb8199636e1857a15d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732289046406 2024-11-22T15:24:09,217 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9713b534acea4f43835d8389c0a7f3e6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732289046406 2024-11-22T15:24:09,228 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:09,229 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#B#compaction#463 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:09,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411221e2f00cc4b544f91851e46d308274067_661abb5eb8be4eaf4f236a86a23909c7 store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:09,229 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a10101c0d2264664b98139bc66f290fc is 50, key is test_row_0/B:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:09,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742375_1551 (size=12983) 2024-11-22T15:24:09,259 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411221e2f00cc4b544f91851e46d308274067_661abb5eb8be4eaf4f236a86a23909c7, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:09,259 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411221e2f00cc4b544f91851e46d308274067_661abb5eb8be4eaf4f236a86a23909c7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:09,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742376_1552 (size=4469) 2024-11-22T15:24:09,640 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/a10101c0d2264664b98139bc66f290fc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a10101c0d2264664b98139bc66f290fc 2024-11-22T15:24:09,646 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/B of 661abb5eb8be4eaf4f236a86a23909c7 into a10101c0d2264664b98139bc66f290fc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:09,646 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:09,646 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/B, priority=13, startTime=1732289049215; duration=0sec 2024-11-22T15:24:09,647 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:09,647 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:B 2024-11-22T15:24:09,647 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:09,648 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:09,648 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 661abb5eb8be4eaf4f236a86a23909c7/C is initiating minor compaction (all files) 2024-11-22T15:24:09,648 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 661abb5eb8be4eaf4f236a86a23909c7/C in TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:09,648 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ee0d5cf4edcf42c8afdd7ce7621ea022, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp, totalSize=36.6 K 2024-11-22T15:24:09,649 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ee0d5cf4edcf42c8afdd7ce7621ea022, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=515, earliestPutTs=1732289043584 2024-11-22T15:24:09,649 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting cbc432a8b92d4b78b82dfd290e70699a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=522, earliestPutTs=1732289044236 2024-11-22T15:24:09,650 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d8043ca993c490abad9b1789978ebae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732289046406 2024-11-22T15:24:09,660 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#C#compaction#465 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:09,661 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5ee03178adf745519fc866f94afb495f is 50, key is test_row_0/C:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:09,663 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 661abb5eb8be4eaf4f236a86a23909c7#A#compaction#464 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:09,664 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/964cb79541e04d40b90360f08149ae3d is 175, key is test_row_0/A:col10/1732289047522/Put/seqid=0 2024-11-22T15:24:09,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742377_1553 (size=12949) 2024-11-22T15:24:09,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742378_1554 (size=31937) 2024-11-22T15:24:10,076 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/5ee03178adf745519fc866f94afb495f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5ee03178adf745519fc866f94afb495f 2024-11-22T15:24:10,077 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/964cb79541e04d40b90360f08149ae3d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/964cb79541e04d40b90360f08149ae3d 2024-11-22T15:24:10,081 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/A of 661abb5eb8be4eaf4f236a86a23909c7 into 964cb79541e04d40b90360f08149ae3d(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:10,081 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 661abb5eb8be4eaf4f236a86a23909c7/C of 661abb5eb8be4eaf4f236a86a23909c7 into 5ee03178adf745519fc866f94afb495f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:10,081 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/C, priority=13, startTime=1732289049215; duration=0sec 2024-11-22T15:24:10,081 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7., storeName=661abb5eb8be4eaf4f236a86a23909c7/A, priority=13, startTime=1732289049215; duration=0sec 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:A 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:10,081 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 661abb5eb8be4eaf4f236a86a23909c7:C 2024-11-22T15:24:10,714 DEBUG [Thread-1771 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:52970 2024-11-22T15:24:10,714 DEBUG [Thread-1771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 118 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1565 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4695 rows 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1559 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4677 rows 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1551 2024-11-22T15:24:10,715 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4653 rows 2024-11-22T15:24:10,716 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1562 2024-11-22T15:24:10,716 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4686 rows 2024-11-22T15:24:10,716 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1550 2024-11-22T15:24:10,716 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4650 rows 2024-11-22T15:24:10,716 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:24:10,716 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f7f772a to 127.0.0.1:52970 2024-11-22T15:24:10,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:10,719 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:24:10,719 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:24:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:10,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:10,724 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289050723"}]},"ts":"1732289050723"} 2024-11-22T15:24:10,725 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:24:10,783 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:24:10,784 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:24:10,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, UNASSIGN}] 2024-11-22T15:24:10,786 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, UNASSIGN 2024-11-22T15:24:10,786 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:10,787 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:24:10,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:10,938 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:10,938 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:10,938 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:24:10,938 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 661abb5eb8be4eaf4f236a86a23909c7, disabling compactions & flushes 2024-11-22T15:24:10,938 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. after waiting 0 ms 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:10,939 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 661abb5eb8be4eaf4f236a86a23909c7 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=A 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=B 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 661abb5eb8be4eaf4f236a86a23909c7, store=C 2024-11-22T15:24:10,939 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:10,944 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112248929b8ca76c4a7c9124980b942488dc_661abb5eb8be4eaf4f236a86a23909c7 is 50, key is test_row_1/A:col10/1732289050711/Put/seqid=0 2024-11-22T15:24:10,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742379_1555 (size=9914) 2024-11-22T15:24:11,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:11,350 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:11,352 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112248929b8ca76c4a7c9124980b942488dc_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248929b8ca76c4a7c9124980b942488dc_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:11,353 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/fa3c3185310b4acdaf1bf725d92d2d92, store: [table=TestAcidGuarantees family=A region=661abb5eb8be4eaf4f236a86a23909c7] 2024-11-22T15:24:11,353 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/fa3c3185310b4acdaf1bf725d92d2d92 is 175, key is test_row_1/A:col10/1732289050711/Put/seqid=0 2024-11-22T15:24:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742380_1556 (size=22561) 2024-11-22T15:24:11,757 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=565, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/fa3c3185310b4acdaf1bf725d92d2d92 2024-11-22T15:24:11,762 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/b292c0d8cf6f4f5784fff27fd5ed850f is 50, key is test_row_1/B:col10/1732289050711/Put/seqid=0 2024-11-22T15:24:11,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742381_1557 (size=9857) 2024-11-22T15:24:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:12,166 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/b292c0d8cf6f4f5784fff27fd5ed850f 2024-11-22T15:24:12,171 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6b60f644b7c94f4abd829237dbe0675b is 50, key is test_row_1/C:col10/1732289050711/Put/seqid=0 2024-11-22T15:24:12,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742382_1558 (size=9857) 2024-11-22T15:24:12,574 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6b60f644b7c94f4abd829237dbe0675b 2024-11-22T15:24:12,577 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/A/fa3c3185310b4acdaf1bf725d92d2d92 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/fa3c3185310b4acdaf1bf725d92d2d92 2024-11-22T15:24:12,581 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/fa3c3185310b4acdaf1bf725d92d2d92, entries=100, sequenceid=565, filesize=22.0 K 2024-11-22T15:24:12,581 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/B/b292c0d8cf6f4f5784fff27fd5ed850f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b292c0d8cf6f4f5784fff27fd5ed850f 2024-11-22T15:24:12,584 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b292c0d8cf6f4f5784fff27fd5ed850f, entries=100, sequenceid=565, filesize=9.6 K 2024-11-22T15:24:12,585 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/.tmp/C/6b60f644b7c94f4abd829237dbe0675b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6b60f644b7c94f4abd829237dbe0675b 2024-11-22T15:24:12,588 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6b60f644b7c94f4abd829237dbe0675b, entries=100, sequenceid=565, filesize=9.6 K 2024-11-22T15:24:12,588 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 661abb5eb8be4eaf4f236a86a23909c7 in 1649ms, sequenceid=565, compaction requested=false 2024-11-22T15:24:12,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6] to archive 2024-11-22T15:24:12,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:12,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/96c45a8957b24d91918c60bff6bef3c1 2024-11-22T15:24:12,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/09c361749e40448fb10b071d8a7f63e3 2024-11-22T15:24:12,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/aa003385f26c43d18a4aa71e70e81d0b 2024-11-22T15:24:12,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/4faeb4f24b9c462ba2c028f3dd7af12a 2024-11-22T15:24:12,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6cd9aed360c9469c96b6696af13a7b6f 2024-11-22T15:24:12,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a23f49d469964f188979661d80f613a1 2024-11-22T15:24:12,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/3cfaa64de02842d9bf32160faabf71b4 2024-11-22T15:24:12,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/233de41cb7324db2a57991b8a2d7cf80 2024-11-22T15:24:12,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/2f53e62b138d431597122341bba435d2 2024-11-22T15:24:12,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/1e7cd6fff05e483bbef72706cb4e279d 2024-11-22T15:24:12,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/458bbea260be4b58acafec26ffe2a1f8 2024-11-22T15:24:12,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/97c4733754f04aae81dab27c3bac1362 2024-11-22T15:24:12,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6ee6a9f89af34077a871c52748d25129 2024-11-22T15:24:12,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/0269cdd28ade4d38a9517b25c7ee46a8 2024-11-22T15:24:12,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/ce4cec3040354d84ac5ceb6ac99ac727 2024-11-22T15:24:12,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/328e8a168ed24f02bbb5f2d02f2d397b 2024-11-22T15:24:12,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/a1eb6f83aebe46b884bcd177f6102689 2024-11-22T15:24:12,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/18a161a9fa25430787cebaf87556caa1 2024-11-22T15:24:12,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/6be2458c2d4040379c3d51f4710b0cee 2024-11-22T15:24:12,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/9713b534acea4f43835d8389c0a7f3e6 2024-11-22T15:24:12,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/c900f769635a48f885c69c33fe147980, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a23be3e99d5f45178bfdef1868f340fb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/f43054983f4249a8b4e4f7d0aaca902e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/db0bdc6609ec4d41bae2716729dfde3c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/28c02be9dc7a46358ab90bd431613449, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b6027279550e40599ddc6a93d6559cb3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d] to archive 2024-11-22T15:24:12,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:12,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/c900f769635a48f885c69c33fe147980 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/c900f769635a48f885c69c33fe147980 2024-11-22T15:24:12,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/58aa5055ca7d4091a05b7ba278cf7dc9 2024-11-22T15:24:12,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/95df3d3c3e1f4520bf3f489f0383f04b 2024-11-22T15:24:12,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a23be3e99d5f45178bfdef1868f340fb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a23be3e99d5f45178bfdef1868f340fb 2024-11-22T15:24:12,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/313a0ca60f8e4195872a4032091b4a56 2024-11-22T15:24:12,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/61dbbab0d0c44878851f9d5cc2ad619e 2024-11-22T15:24:12,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/f43054983f4249a8b4e4f7d0aaca902e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/f43054983f4249a8b4e4f7d0aaca902e 2024-11-22T15:24:12,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/3f2ef7cf028a435b962a643c20f23124 2024-11-22T15:24:12,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/2940e11d10434961bf8d5ca15a16d188 2024-11-22T15:24:12,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/77c1fe67c3a242cb885f52f8ebe47a4b 2024-11-22T15:24:12,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/db0bdc6609ec4d41bae2716729dfde3c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/db0bdc6609ec4d41bae2716729dfde3c 2024-11-22T15:24:12,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5ca96a0d37444d0eb79e1a6e1a886b63 2024-11-22T15:24:12,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/9219b88c568e43f9989da3e397528add 2024-11-22T15:24:12,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/28c02be9dc7a46358ab90bd431613449 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/28c02be9dc7a46358ab90bd431613449 2024-11-22T15:24:12,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/0c9096b2eff4484d8c88187ac9fce95c 2024-11-22T15:24:12,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/679353d8dd56495081df9f1bcd11dc41 2024-11-22T15:24:12,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b6027279550e40599ddc6a93d6559cb3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b6027279550e40599ddc6a93d6559cb3 2024-11-22T15:24:12,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/1db96e21a6ab47fcb55c744693d51eab 2024-11-22T15:24:12,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/5792540eadbf4af8b686e06304acdd28 2024-11-22T15:24:12,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/35491e9d346145eb8199636e1857a15d 2024-11-22T15:24:12,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cb9cd30f30094fc3b45985e5bd64f102, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a2a81017e3464625afdd5453eff4b5ba, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/21965030d1e84588a21d3b44061ebb76, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e5f9a250ed3f4160b3d01584d2d7ceb5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ee0d5cf4edcf42c8afdd7ce7621ea022, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae] to archive 2024-11-22T15:24:12,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:12,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cb9cd30f30094fc3b45985e5bd64f102 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cb9cd30f30094fc3b45985e5bd64f102 2024-11-22T15:24:12,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/90403198d8a44db0b3c0f3b33bee39a8 2024-11-22T15:24:12,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/02126ed47d304dac8e564d19f7518bf2 2024-11-22T15:24:12,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ae99c736547f4ff2a03bdc6ab6314c5f 2024-11-22T15:24:12,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8e5845ca371a43478dc0ac2db15a7644 2024-11-22T15:24:12,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a2a81017e3464625afdd5453eff4b5ba to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/a2a81017e3464625afdd5453eff4b5ba 2024-11-22T15:24:12,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/8ff986aef920428baf083e68b1fc62cd 2024-11-22T15:24:12,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/04a966d642074fdd9c427450cbc12d80 2024-11-22T15:24:12,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ff090d3dc6a4433e84727788ee6b061d 2024-11-22T15:24:12,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/21965030d1e84588a21d3b44061ebb76 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/21965030d1e84588a21d3b44061ebb76 2024-11-22T15:24:12,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e2634ac29a384073ae2352d4a27137d2 2024-11-22T15:24:12,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/7fdd253d3fe0461a8cdf2ea381d5b4b1 2024-11-22T15:24:12,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e5f9a250ed3f4160b3d01584d2d7ceb5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/e5f9a250ed3f4160b3d01584d2d7ceb5 2024-11-22T15:24:12,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ef33ae7a8e644604b6177e981580a594 2024-11-22T15:24:12,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6dcbc168fc784a53ab0489993fd0e32f 2024-11-22T15:24:12,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ee0d5cf4edcf42c8afdd7ce7621ea022 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/ee0d5cf4edcf42c8afdd7ce7621ea022 2024-11-22T15:24:12,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/3cb447ff2e594c7abea5759eef42ce03 2024-11-22T15:24:12,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/cbc432a8b92d4b78b82dfd290e70699a 2024-11-22T15:24:12,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6d8043ca993c490abad9b1789978ebae 2024-11-22T15:24:12,648 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits/568.seqid, newMaxSeqId=568, maxSeqId=4 2024-11-22T15:24:12,648 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7. 2024-11-22T15:24:12,648 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 661abb5eb8be4eaf4f236a86a23909c7: 2024-11-22T15:24:12,649 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,650 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=661abb5eb8be4eaf4f236a86a23909c7, regionState=CLOSED 2024-11-22T15:24:12,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-22T15:24:12,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 661abb5eb8be4eaf4f236a86a23909c7, server=77927f992d0b,36033,1732288915809 in 1.8630 sec 2024-11-22T15:24:12,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-22T15:24:12,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=661abb5eb8be4eaf4f236a86a23909c7, UNASSIGN in 1.8660 sec 2024-11-22T15:24:12,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-22T15:24:12,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8690 sec 2024-11-22T15:24:12,655 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289052655"}]},"ts":"1732289052655"} 2024-11-22T15:24:12,656 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:24:12,692 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:24:12,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9730 sec 2024-11-22T15:24:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-22T15:24:12,826 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-22T15:24:12,827 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:24:12,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,828 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T15:24:12,829 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,831 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,832 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits] 2024-11-22T15:24:12,834 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/964cb79541e04d40b90360f08149ae3d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/964cb79541e04d40b90360f08149ae3d 2024-11-22T15:24:12,835 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/fa3c3185310b4acdaf1bf725d92d2d92 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/A/fa3c3185310b4acdaf1bf725d92d2d92 2024-11-22T15:24:12,836 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a10101c0d2264664b98139bc66f290fc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/a10101c0d2264664b98139bc66f290fc 2024-11-22T15:24:12,837 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b292c0d8cf6f4f5784fff27fd5ed850f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/B/b292c0d8cf6f4f5784fff27fd5ed850f 2024-11-22T15:24:12,839 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5ee03178adf745519fc866f94afb495f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/5ee03178adf745519fc866f94afb495f 2024-11-22T15:24:12,840 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6b60f644b7c94f4abd829237dbe0675b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/C/6b60f644b7c94f4abd829237dbe0675b 2024-11-22T15:24:12,842 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits/568.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7/recovered.edits/568.seqid 2024-11-22T15:24:12,843 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,843 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:24:12,844 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:24:12,844 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T15:24:12,847 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122041e8dc911e2494c93b765caabb85a2d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122041e8dc911e2494c93b765caabb85a2d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,848 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122117fde7e417c4711a4036f103539b2f3_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122117fde7e417c4711a4036f103539b2f3_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,849 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221669961546aa482c9a49d7fab3d080b9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221669961546aa482c9a49d7fab3d080b9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,850 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221917181e87db453d81dfd4cabb217d33_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221917181e87db453d81dfd4cabb217d33_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,850 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221a1196f812d44e13880012b88f0c63f9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221a1196f812d44e13880012b88f0c63f9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,851 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ad3af9e2fae4e93899ef18f47737e3e_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411221ad3af9e2fae4e93899ef18f47737e3e_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,852 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122238f793acbd84e84860a758ca39b8fb8_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122238f793acbd84e84860a758ca39b8fb8_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,853 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122259797381eb64e6789ade616f226e251_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122259797381eb64e6789ade616f226e251_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,854 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122327b765993ca42aeacee5c53d8dd98c9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122327b765993ca42aeacee5c53d8dd98c9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,855 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122448eb70eeb8a4629899d9b95a16d5309_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122448eb70eeb8a4629899d9b95a16d5309_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,856 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248929b8ca76c4a7c9124980b942488dc_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112248929b8ca76c4a7c9124980b942488dc_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,858 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225ec2638df8824ab0a1015796b5508cf8_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225ec2638df8824ab0a1015796b5508cf8_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,859 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122734ac57d13c147f9b14c4639d8e5992d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122734ac57d13c147f9b14c4639d8e5992d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,860 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228214ff21ee8d4de88ed65be4f835777b_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411228214ff21ee8d4de88ed65be4f835777b_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,861 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122870c464f34e940108e263a1085156c7d_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122870c464f34e940108e263a1085156c7d_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,867 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122941d026bfcb74d599b687fa8e35de6c1_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122941d026bfcb74d599b687fa8e35de6c1_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,868 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229dbb61bc9cfe48fba4ea9451be2731c9_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229dbb61bc9cfe48fba4ea9451be2731c9_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,869 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a40f4a4c442848299de16872def09234_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a40f4a4c442848299de16872def09234_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,871 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8a67dd790ca4f8d804c14eabfef0921_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122a8a67dd790ca4f8d804c14eabfef0921_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,872 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0501c3a2a50431ba4e9b1ac30dca7d0_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b0501c3a2a50431ba4e9b1ac30dca7d0_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,874 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b7139ff281804174a1d20f425a4c37de_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122b7139ff281804174a1d20f425a4c37de_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,879 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdaa534810b34242b870f5db8eb8ce28_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cdaa534810b34242b870f5db8eb8ce28_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,881 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d1390f31d68043d98c34bcf76d7a018f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d1390f31d68043d98c34bcf76d7a018f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,882 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7a390d47e6b41678e783dba9875f82f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122d7a390d47e6b41678e783dba9875f82f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,883 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e8af8674aa5a48e3ac735b0740231ef2_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e8af8674aa5a48e3ac735b0740231ef2_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,884 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f46ba550a520438c9d13b2a2f478ee1c_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f46ba550a520438c9d13b2a2f478ee1c_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,885 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f96000c8899a4160b0cfeafe99c1596f_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f96000c8899a4160b0cfeafe99c1596f_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,886 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb3dbf5ea4f148cda2d1bfc5b5224996_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fb3dbf5ea4f148cda2d1bfc5b5224996_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,887 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fcc988fa242f4f7a969d118f74d1ce05_661abb5eb8be4eaf4f236a86a23909c7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122fcc988fa242f4f7a969d118f74d1ce05_661abb5eb8be4eaf4f236a86a23909c7 2024-11-22T15:24:12,888 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:24:12,889 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,890 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:24:12,892 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:24:12,893 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,893 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:24:12,893 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732289052893"}]},"ts":"9223372036854775807"} 2024-11-22T15:24:12,896 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:24:12,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 661abb5eb8be4eaf4f236a86a23909c7, NAME => 'TestAcidGuarantees,,1732289023496.661abb5eb8be4eaf4f236a86a23909c7.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:24:12,896 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:24:12,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732289052896"}]},"ts":"9223372036854775807"} 2024-11-22T15:24:12,904 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:24:12,909 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 82 msec 2024-11-22T15:24:12,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-22T15:24:12,929 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-22T15:24:12,938 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=242 (was 237) - Thread LEAK? -, OpenFileDescriptor=460 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=679 (was 660) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4055 (was 4155) 2024-11-22T15:24:12,947 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=242, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=679, ProcessCount=11, AvailableMemoryMB=4055 2024-11-22T15:24:12,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:24:12,948 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:24:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:12,949 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:24:12,949 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:12,949 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-22T15:24:12,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-22T15:24:12,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:24:12,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742383_1559 (size=963) 2024-11-22T15:24:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-22T15:24:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-22T15:24:13,356 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:24:13,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742384_1560 (size=53) 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ed44e89acb87ffee72f4c7902667e851, disabling compactions & flushes 2024-11-22T15:24:13,369 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. after waiting 0 ms 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,369 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,369 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:13,370 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:24:13,370 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732289053370"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732289053370"}]},"ts":"1732289053370"} 2024-11-22T15:24:13,371 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:24:13,372 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:24:13,372 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289053372"}]},"ts":"1732289053372"} 2024-11-22T15:24:13,372 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:24:13,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, ASSIGN}] 2024-11-22T15:24:13,468 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, ASSIGN 2024-11-22T15:24:13,468 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:24:13,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-22T15:24:13,619 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=ed44e89acb87ffee72f4c7902667e851, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:13,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:13,770 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:13,772 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,772 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:24:13,773 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,773 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:24:13,773 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,773 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,774 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,775 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:13,775 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed44e89acb87ffee72f4c7902667e851 columnFamilyName A 2024-11-22T15:24:13,775 DEBUG [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:13,775 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(327): Store=ed44e89acb87ffee72f4c7902667e851/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:13,775 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,776 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:13,776 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed44e89acb87ffee72f4c7902667e851 columnFamilyName B 2024-11-22T15:24:13,776 DEBUG [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:13,778 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(327): Store=ed44e89acb87ffee72f4c7902667e851/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:13,778 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,779 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:13,779 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ed44e89acb87ffee72f4c7902667e851 columnFamilyName C 2024-11-22T15:24:13,779 DEBUG [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:13,779 INFO [StoreOpener-ed44e89acb87ffee72f4c7902667e851-1 {}] regionserver.HStore(327): Store=ed44e89acb87ffee72f4c7902667e851/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:13,780 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,780 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,780 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,781 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:24:13,782 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:13,784 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:24:13,784 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened ed44e89acb87ffee72f4c7902667e851; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71086966, jitterRate=0.05927833914756775}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:24:13,784 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:13,785 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., pid=129, masterSystemTime=1732289053770 2024-11-22T15:24:13,787 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,787 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:13,787 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=ed44e89acb87ffee72f4c7902667e851, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:13,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-22T15:24:13,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 in 168 msec 2024-11-22T15:24:13,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-22T15:24:13,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, ASSIGN in 322 msec 2024-11-22T15:24:13,790 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:24:13,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289053790"}]},"ts":"1732289053790"} 2024-11-22T15:24:13,791 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:24:13,800 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:24:13,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 852 msec 2024-11-22T15:24:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-22T15:24:14,053 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-22T15:24:14,054 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x301741f1 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22a6e9f 2024-11-22T15:24:14,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c60eb7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,095 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,095 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:24:14,096 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:24:14,098 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63cefe40 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32c12a30 2024-11-22T15:24:14,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b10416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65df2359 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ef40578 2024-11-22T15:24:14,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f142b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,118 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-11-22T15:24:14,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-11-22T15:24:14,143 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,143 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-11-22T15:24:14,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-11-22T15:24:14,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-11-22T15:24:14,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,169 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6050584c to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@154f0f85 2024-11-22T15:24:14,192 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496fe03f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,193 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-11-22T15:24:14,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,202 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51196534 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c2725 2024-11-22T15:24:14,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405c04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:14,216 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-22T15:24:14,217 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:14,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:14,217 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:14,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:14,223 DEBUG [hconnection-0x29adc8bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,224 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:14,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:14,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:14,239 DEBUG [hconnection-0x1e7852a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,239 DEBUG [hconnection-0x579eb214-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,240 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,240 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,243 DEBUG [hconnection-0x3cd3b54-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,243 DEBUG [hconnection-0x3236007c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,244 DEBUG [hconnection-0x5afc78c1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,244 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,244 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,245 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,245 DEBUG [hconnection-0x1dd6ffd7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,246 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,251 DEBUG [hconnection-0x506cebd5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,252 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289114254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289114254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289114255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c99f64c4c72549448cf6f01da8e45ca6 is 50, key is test_row_0/A:col10/1732289054230/Put/seqid=0 2024-11-22T15:24:14,307 DEBUG [hconnection-0x6ae177cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,308 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289114309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289114355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289114356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289114355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,379 DEBUG [hconnection-0x176b3278-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:14,380 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:14,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289114381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742385_1561 (size=9657) 2024-11-22T15:24:14,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c99f64c4c72549448cf6f01da8e45ca6 2024-11-22T15:24:14,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289114410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/730ea2680882415d9aaf5a383137fbb6 is 50, key is test_row_0/B:col10/1732289054230/Put/seqid=0 2024-11-22T15:24:14,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289114484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742386_1562 (size=9657) 2024-11-22T15:24:14,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:14,531 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:14,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:14,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289114557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289114557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289114557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289114613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,684 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:14,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:14,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,684 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289114689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:14,838 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:14,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:14,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289114859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289114859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289114861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/730ea2680882415d9aaf5a383137fbb6 2024-11-22T15:24:14,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289114918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9f8dcf82581b4fb281262fda41374a35 is 50, key is test_row_0/C:col10/1732289054230/Put/seqid=0 2024-11-22T15:24:14,991 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:14,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:14,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:14,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:14,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:14,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:14,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289114994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742387_1563 (size=9657) 2024-11-22T15:24:15,144 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:15,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:15,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,298 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:15,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:15,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:15,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289115360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289115364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289115366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9f8dcf82581b4fb281262fda41374a35 2024-11-22T15:24:15,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c99f64c4c72549448cf6f01da8e45ca6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6 2024-11-22T15:24:15,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6, entries=100, sequenceid=12, filesize=9.4 K 2024-11-22T15:24:15,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289115424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/730ea2680882415d9aaf5a383137fbb6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6 2024-11-22T15:24:15,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6, entries=100, sequenceid=12, filesize=9.4 K 2024-11-22T15:24:15,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9f8dcf82581b4fb281262fda41374a35 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35 2024-11-22T15:24:15,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35, entries=100, sequenceid=12, filesize=9.4 K 2024-11-22T15:24:15,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for ed44e89acb87ffee72f4c7902667e851 in 1210ms, sequenceid=12, compaction requested=false 2024-11-22T15:24:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:15,453 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-22T15:24:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,454 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T15:24:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:15,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0abad8afff374a02ba9e4e0dca683fab is 50, key is test_row_0/A:col10/1732289054252/Put/seqid=0 2024-11-22T15:24:15,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:15,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:15,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742388_1564 (size=12001) 2024-11-22T15:24:15,535 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0abad8afff374a02ba9e4e0dca683fab 2024-11-22T15:24:15,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/c0b105654a6042f8a736c7bfbaa5e208 is 50, key is test_row_0/B:col10/1732289054252/Put/seqid=0 2024-11-22T15:24:15,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289115551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742389_1565 (size=12001) 2024-11-22T15:24:15,576 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/c0b105654a6042f8a736c7bfbaa5e208 2024-11-22T15:24:15,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a78c87c84cd34aec8f9c0ed75ab9d923 is 50, key is test_row_0/C:col10/1732289054252/Put/seqid=0 2024-11-22T15:24:15,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742390_1566 (size=12001) 2024-11-22T15:24:15,635 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a78c87c84cd34aec8f9c0ed75ab9d923 2024-11-22T15:24:15,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0abad8afff374a02ba9e4e0dca683fab as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab 2024-11-22T15:24:15,646 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:24:15,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/c0b105654a6042f8a736c7bfbaa5e208 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208 2024-11-22T15:24:15,651 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:24:15,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a78c87c84cd34aec8f9c0ed75ab9d923 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923 2024-11-22T15:24:15,657 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923, entries=150, sequenceid=39, filesize=11.7 K 2024-11-22T15:24:15,658 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ed44e89acb87ffee72f4c7902667e851 in 204ms, sequenceid=39, compaction requested=false 2024-11-22T15:24:15,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:15,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:15,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-22T15:24:15,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-22T15:24:15,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-22T15:24:15,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4430 sec 2024-11-22T15:24:15,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.4470 sec 2024-11-22T15:24:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:15,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:15,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:15,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:15,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:15,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:15,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/68b4affc247148819f809cf4ad5917d5 is 50, key is test_row_0/A:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:15,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742391_1567 (size=14341) 2024-11-22T15:24:15,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/68b4affc247148819f809cf4ad5917d5 2024-11-22T15:24:15,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a94976aa10b640cca2d897dc5ba0f220 is 50, key is test_row_0/B:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:15,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742392_1568 (size=12001) 2024-11-22T15:24:15,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289115881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:15,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:15,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289115990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289116198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a94976aa10b640cca2d897dc5ba0f220 2024-11-22T15:24:16,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/feea95b80ed14a159d59b792ca3d9223 is 50, key is test_row_0/C:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:16,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742393_1569 (size=12001) 2024-11-22T15:24:16,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/feea95b80ed14a159d59b792ca3d9223 2024-11-22T15:24:16,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/68b4affc247148819f809cf4ad5917d5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5 2024-11-22T15:24:16,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5, entries=200, sequenceid=50, filesize=14.0 K 2024-11-22T15:24:16,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a94976aa10b640cca2d897dc5ba0f220 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220 2024-11-22T15:24:16,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T15:24:16,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/feea95b80ed14a159d59b792ca3d9223 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223 2024-11-22T15:24:16,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223, entries=150, sequenceid=50, filesize=11.7 K 2024-11-22T15:24:16,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 602ms, sequenceid=50, compaction requested=true 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:16,271 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:16,271 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:16,271 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:16,273 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:16,273 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:16,273 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,273 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=32.9 K 2024-11-22T15:24:16,274 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:16,274 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:16,274 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 730ea2680882415d9aaf5a383137fbb6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732289054230 2024-11-22T15:24:16,274 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,274 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=35.2 K 2024-11-22T15:24:16,276 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c0b105654a6042f8a736c7bfbaa5e208, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732289054248 2024-11-22T15:24:16,276 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting c99f64c4c72549448cf6f01da8e45ca6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732289054230 2024-11-22T15:24:16,276 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0abad8afff374a02ba9e4e0dca683fab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732289054248 2024-11-22T15:24:16,276 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a94976aa10b640cca2d897dc5ba0f220, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:16,276 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68b4affc247148819f809cf4ad5917d5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:16,297 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:16,297 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/a3e996a1cb524298bfe48dfad09a1fac is 50, key is test_row_0/A:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:16,309 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:16,309 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/ccced7bbc5a947689f511f1dc25cb5e3 is 50, key is test_row_0/B:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-22T15:24:16,321 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-22T15:24:16,322 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:16,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-22T15:24:16,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T15:24:16,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:16,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:16,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:16,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742394_1570 (size=12104) 2024-11-22T15:24:16,339 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/a3e996a1cb524298bfe48dfad09a1fac as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a3e996a1cb524298bfe48dfad09a1fac 2024-11-22T15:24:16,344 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into a3e996a1cb524298bfe48dfad09a1fac(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:16,344 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:16,344 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289056271; duration=0sec 2024-11-22T15:24:16,344 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:16,344 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:16,344 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:16,345 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:16,345 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:16,345 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,345 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=32.9 K 2024-11-22T15:24:16,346 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f8dcf82581b4fb281262fda41374a35, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732289054230 2024-11-22T15:24:16,346 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a78c87c84cd34aec8f9c0ed75ab9d923, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732289054248 2024-11-22T15:24:16,346 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting feea95b80ed14a159d59b792ca3d9223, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:16,364 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:16,365 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/bc12ee2135ba4f58bdc14283f26cb37b is 50, key is test_row_0/C:col10/1732289055660/Put/seqid=0 2024-11-22T15:24:16,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:16,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:16,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742395_1571 (size=12104) 2024-11-22T15:24:16,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289116393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/139c6eff4860440892562b5638c087d4 is 50, key is test_row_0/A:col10/1732289055857/Put/seqid=0 2024-11-22T15:24:16,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289116402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289116402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T15:24:16,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289116434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742396_1572 (size=12104) 2024-11-22T15:24:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742397_1573 (size=14341) 2024-11-22T15:24:16,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/139c6eff4860440892562b5638c087d4 2024-11-22T15:24:16,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-22T15:24:16,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:16,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:16,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:16,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:16,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e45f5588a0e24342907f275f8818badf is 50, key is test_row_0/B:col10/1732289055857/Put/seqid=0 2024-11-22T15:24:16,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289116503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742398_1574 (size=12001) 2024-11-22T15:24:16,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e45f5588a0e24342907f275f8818badf 2024-11-22T15:24:16,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289116508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289116512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289116512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/12fc72e6ee604b6a9ca3e3bb53856991 is 50, key is test_row_0/C:col10/1732289055857/Put/seqid=0 2024-11-22T15:24:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742399_1575 (size=12001) 2024-11-22T15:24:16,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/12fc72e6ee604b6a9ca3e3bb53856991 2024-11-22T15:24:16,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/139c6eff4860440892562b5638c087d4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4 2024-11-22T15:24:16,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4, entries=200, sequenceid=77, filesize=14.0 K 2024-11-22T15:24:16,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e45f5588a0e24342907f275f8818badf as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf 2024-11-22T15:24:16,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf, entries=150, sequenceid=77, filesize=11.7 K 2024-11-22T15:24:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/12fc72e6ee604b6a9ca3e3bb53856991 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991 2024-11-22T15:24:16,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991, entries=150, sequenceid=77, filesize=11.7 K 2024-11-22T15:24:16,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed44e89acb87ffee72f4c7902667e851 in 192ms, sequenceid=77, compaction requested=false 2024-11-22T15:24:16,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T15:24:16,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-22T15:24:16,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:16,630 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:16,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:16,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf8aa59544c5494c9213c938cfdc3957 is 50, key is test_row_0/A:col10/1732289056400/Put/seqid=0 2024-11-22T15:24:16,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742400_1576 (size=12001) 2024-11-22T15:24:16,671 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf8aa59544c5494c9213c938cfdc3957 2024-11-22T15:24:16,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/678e2c356a274cc5ba19bbe9f5781e12 is 50, key is test_row_0/B:col10/1732289056400/Put/seqid=0 2024-11-22T15:24:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742401_1577 (size=12001) 2024-11-22T15:24:16,710 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/678e2c356a274cc5ba19bbe9f5781e12 2024-11-22T15:24:16,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/0f48028a913f4bfb920eddac6b8f8122 is 50, key is test_row_0/C:col10/1732289056400/Put/seqid=0 2024-11-22T15:24:16,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:16,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742402_1578 (size=12001) 2024-11-22T15:24:16,794 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/ccced7bbc5a947689f511f1dc25cb5e3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/ccced7bbc5a947689f511f1dc25cb5e3 2024-11-22T15:24:16,817 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into ccced7bbc5a947689f511f1dc25cb5e3(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:16,817 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:16,817 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289056271; duration=0sec 2024-11-22T15:24:16,817 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:16,817 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:16,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289116841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,857 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/bc12ee2135ba4f58bdc14283f26cb37b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/bc12ee2135ba4f58bdc14283f26cb37b 2024-11-22T15:24:16,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289116851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289116852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,865 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into bc12ee2135ba4f58bdc14283f26cb37b(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:16,865 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:16,865 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289056271; duration=0sec 2024-11-22T15:24:16,865 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:16,865 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:16,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T15:24:16,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289116952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289116963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:16,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289116965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289117012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,149 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/0f48028a913f4bfb920eddac6b8f8122 2024-11-22T15:24:17,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf8aa59544c5494c9213c938cfdc3957 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957 2024-11-22T15:24:17,157 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957, entries=150, sequenceid=87, filesize=11.7 K 2024-11-22T15:24:17,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/678e2c356a274cc5ba19bbe9f5781e12 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12 2024-11-22T15:24:17,162 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12, entries=150, sequenceid=87, filesize=11.7 K 2024-11-22T15:24:17,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289117161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/0f48028a913f4bfb920eddac6b8f8122 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122 2024-11-22T15:24:17,166 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122, entries=150, sequenceid=87, filesize=11.7 K 2024-11-22T15:24:17,167 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for ed44e89acb87ffee72f4c7902667e851 in 538ms, sequenceid=87, compaction requested=true 2024-11-22T15:24:17,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:17,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-22T15:24:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-22T15:24:17,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-22T15:24:17,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 845 msec 2024-11-22T15:24:17,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 848 msec 2024-11-22T15:24:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:17,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:17,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d73ea038e77a4b8db4f5a7407d5bbed3 is 50, key is test_row_0/A:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742403_1579 (size=16681) 2024-11-22T15:24:17,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289117204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289117205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289117310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289117315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-22T15:24:17,426 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-22T15:24:17,427 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-22T15:24:17,429 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:17,429 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:17,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:17,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289117464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289117516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289117523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:17,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:17,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:17,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d73ea038e77a4b8db4f5a7407d5bbed3 2024-11-22T15:24:17,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/37e204bec45e4342af1a6ee4ef4f1522 is 50, key is test_row_0/B:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742404_1580 (size=12001) 2024-11-22T15:24:17,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/37e204bec45e4342af1a6ee4ef4f1522 2024-11-22T15:24:17,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5dc3a98dc97b4a41a1641f1dfb072bc7 is 50, key is test_row_0/C:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742405_1581 (size=12001) 2024-11-22T15:24:17,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5dc3a98dc97b4a41a1641f1dfb072bc7 2024-11-22T15:24:17,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:17,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d73ea038e77a4b8db4f5a7407d5bbed3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3 2024-11-22T15:24:17,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:17,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:17,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3, entries=250, sequenceid=118, filesize=16.3 K 2024-11-22T15:24:17,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/37e204bec45e4342af1a6ee4ef4f1522 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522 2024-11-22T15:24:17,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522, entries=150, sequenceid=118, filesize=11.7 K 2024-11-22T15:24:17,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5dc3a98dc97b4a41a1641f1dfb072bc7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7 2024-11-22T15:24:17,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7, entries=150, sequenceid=118, filesize=11.7 K 2024-11-22T15:24:17,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for ed44e89acb87ffee72f4c7902667e851 in 574ms, sequenceid=118, compaction requested=true 2024-11-22T15:24:17,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:17,755 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:17,756 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:17,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:17,756 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55127 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:17,757 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:17,757 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,757 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a3e996a1cb524298bfe48dfad09a1fac, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=53.8 K 2024-11-22T15:24:17,757 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3e996a1cb524298bfe48dfad09a1fac, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:17,757 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 139c6eff4860440892562b5638c087d4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289055857 2024-11-22T15:24:17,758 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf8aa59544c5494c9213c938cfdc3957, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732289056392 2024-11-22T15:24:17,758 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:17,758 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:17,758 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,758 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/ccced7bbc5a947689f511f1dc25cb5e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=47.0 K 2024-11-22T15:24:17,758 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d73ea038e77a4b8db4f5a7407d5bbed3, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056776 2024-11-22T15:24:17,759 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ccced7bbc5a947689f511f1dc25cb5e3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:17,760 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e45f5588a0e24342907f275f8818badf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289055857 2024-11-22T15:24:17,761 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 678e2c356a274cc5ba19bbe9f5781e12, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732289056392 2024-11-22T15:24:17,762 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 37e204bec45e4342af1a6ee4ef4f1522, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056848 2024-11-22T15:24:17,783 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#490 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:17,784 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/162412ee005f4050bd62bcdf49e107de is 50, key is test_row_0/B:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,788 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#491 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:17,788 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/28c6b400ca75493291b518ef55e726fd is 50, key is test_row_0/A:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742406_1582 (size=12241) 2024-11-22T15:24:17,807 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/162412ee005f4050bd62bcdf49e107de as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/162412ee005f4050bd62bcdf49e107de 2024-11-22T15:24:17,811 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 162412ee005f4050bd62bcdf49e107de(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:17,811 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:17,811 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=12, startTime=1732289057756; duration=0sec 2024-11-22T15:24:17,811 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:17,811 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:17,811 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:17,815 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:17,815 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:17,815 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,816 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/bc12ee2135ba4f58bdc14283f26cb37b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=47.0 K 2024-11-22T15:24:17,816 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting bc12ee2135ba4f58bdc14283f26cb37b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732289055540 2024-11-22T15:24:17,817 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 12fc72e6ee604b6a9ca3e3bb53856991, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732289055857 2024-11-22T15:24:17,817 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f48028a913f4bfb920eddac6b8f8122, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732289056392 2024-11-22T15:24:17,818 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dc3a98dc97b4a41a1641f1dfb072bc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056848 2024-11-22T15:24:17,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742407_1583 (size=12241) 2024-11-22T15:24:17,834 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#492 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:17,834 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/70b31ad5aca14501a72592af90972a8c is 50, key is test_row_0/C:col10/1732289057177/Put/seqid=0 2024-11-22T15:24:17,843 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:17,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5fc8ba1542404371baef55275918ddc1 is 50, key is test_row_0/A:col10/1732289057828/Put/seqid=0 2024-11-22T15:24:17,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742409_1585 (size=14441) 2024-11-22T15:24:17,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5fc8ba1542404371baef55275918ddc1 2024-11-22T15:24:17,886 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:17,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:17,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:17,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:17,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742408_1584 (size=12241) 2024-11-22T15:24:17,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/094a45bc21d44feca29faf5a95812ba0 is 50, key is test_row_0/B:col10/1732289057828/Put/seqid=0 2024-11-22T15:24:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742410_1586 (size=12051) 2024-11-22T15:24:17,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/094a45bc21d44feca29faf5a95812ba0 2024-11-22T15:24:17,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289117934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289117936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:17,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ff8443efcd3240fcb7f8c10cbcfa6474 is 50, key is test_row_0/C:col10/1732289057828/Put/seqid=0 2024-11-22T15:24:17,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742411_1587 (size=12051) 2024-11-22T15:24:17,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ff8443efcd3240fcb7f8c10cbcfa6474 2024-11-22T15:24:17,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5fc8ba1542404371baef55275918ddc1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1 2024-11-22T15:24:17,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1, entries=200, sequenceid=131, filesize=14.1 K 2024-11-22T15:24:17,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/094a45bc21d44feca29faf5a95812ba0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0 2024-11-22T15:24:17,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0, entries=150, sequenceid=131, filesize=11.8 K 2024-11-22T15:24:17,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ff8443efcd3240fcb7f8c10cbcfa6474 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474 2024-11-22T15:24:17,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474, entries=150, sequenceid=131, filesize=11.8 K 2024-11-22T15:24:17,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed44e89acb87ffee72f4c7902667e851 in 138ms, sequenceid=131, compaction requested=false 2024-11-22T15:24:17,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:17,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:17,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:17,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:17,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:17,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:17,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:17,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/17c66d7f70d34b529a64c341b49f9443 is 50, key is test_row_0/A:col10/1732289057925/Put/seqid=0 2024-11-22T15:24:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:18,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742412_1588 (size=14541) 2024-11-22T15:24:18,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/17c66d7f70d34b529a64c341b49f9443 2024-11-22T15:24:18,039 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/7525b018f71f4cfab94babb1cf548031 is 50, key is test_row_0/B:col10/1732289057925/Put/seqid=0 2024-11-22T15:24:18,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289118043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289118049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289118048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289118050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742413_1589 (size=12151) 2024-11-22T15:24:18,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/7525b018f71f4cfab94babb1cf548031 2024-11-22T15:24:18,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/df537eec9935473ea63fd39ba7109178 is 50, key is test_row_0/C:col10/1732289057925/Put/seqid=0 2024-11-22T15:24:18,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742414_1590 (size=12151) 2024-11-22T15:24:18,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/df537eec9935473ea63fd39ba7109178 2024-11-22T15:24:18,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/17c66d7f70d34b529a64c341b49f9443 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443 2024-11-22T15:24:18,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443, entries=200, sequenceid=156, filesize=14.2 K 2024-11-22T15:24:18,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/7525b018f71f4cfab94babb1cf548031 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031 2024-11-22T15:24:18,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031, entries=150, sequenceid=156, filesize=11.9 K 2024-11-22T15:24:18,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/df537eec9935473ea63fd39ba7109178 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178 2024-11-22T15:24:18,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178, entries=150, sequenceid=156, filesize=11.9 K 2024-11-22T15:24:18,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ed44e89acb87ffee72f4c7902667e851 in 181ms, sequenceid=156, compaction requested=true 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:18,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T15:24:18,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:18,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:18,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,189 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:24:18,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/12a5cf6595de422bb29581d3a582eb82 is 50, key is test_row_0/A:col10/1732289058045/Put/seqid=0 2024-11-22T15:24:18,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/28c6b400ca75493291b518ef55e726fd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/28c6b400ca75493291b518ef55e726fd 2024-11-22T15:24:18,228 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 28c6b400ca75493291b518ef55e726fd(size=12.0 K), total size for store is 40.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:18,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,228 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=12, startTime=1732289057755; duration=0sec 2024-11-22T15:24:18,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-22T15:24:18,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:18,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:18,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. because compaction request was cancelled 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41223 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:18,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:18,229 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,229 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/28c6b400ca75493291b518ef55e726fd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=40.3 K 2024-11-22T15:24:18,233 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28c6b400ca75493291b518ef55e726fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056848 2024-11-22T15:24:18,237 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fc8ba1542404371baef55275918ddc1, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732289057192 2024-11-22T15:24:18,238 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17c66d7f70d34b529a64c341b49f9443, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732289057925 2024-11-22T15:24:18,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742415_1591 (size=14537) 2024-11-22T15:24:18,264 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#500 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:18,264 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/1e97f0c27b614fd6812fbfc5fdccbd2f is 50, key is test_row_0/A:col10/1732289057925/Put/seqid=0 2024-11-22T15:24:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742416_1592 (size=12493) 2024-11-22T15:24:18,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289118282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289118283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,297 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/1e97f0c27b614fd6812fbfc5fdccbd2f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1e97f0c27b614fd6812fbfc5fdccbd2f 2024-11-22T15:24:18,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289118286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,303 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 1e97f0c27b614fd6812fbfc5fdccbd2f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:18,303 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,303 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289058171; duration=0sec 2024-11-22T15:24:18,303 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:18,303 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:18,303 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:18,304 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:18,304 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:18,304 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,304 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/162412ee005f4050bd62bcdf49e107de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=35.6 K 2024-11-22T15:24:18,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289118294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,304 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 162412ee005f4050bd62bcdf49e107de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056848 2024-11-22T15:24:18,305 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 094a45bc21d44feca29faf5a95812ba0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732289057192 2024-11-22T15:24:18,305 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7525b018f71f4cfab94babb1cf548031, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732289057925 2024-11-22T15:24:18,317 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/70b31ad5aca14501a72592af90972a8c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/70b31ad5aca14501a72592af90972a8c 2024-11-22T15:24:18,324 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 70b31ad5aca14501a72592af90972a8c(size=12.0 K), total size for store is 35.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:18,324 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,324 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=12, startTime=1732289057756; duration=0sec 2024-11-22T15:24:18,324 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:18,324 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:18,327 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#501 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:18,328 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/762482e199d34201aa92bc171db22ef0 is 50, key is test_row_0/B:col10/1732289057925/Put/seqid=0 2024-11-22T15:24:18,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742417_1593 (size=12493) 2024-11-22T15:24:18,381 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/762482e199d34201aa92bc171db22ef0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/762482e199d34201aa92bc171db22ef0 2024-11-22T15:24:18,386 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 762482e199d34201aa92bc171db22ef0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:18,386 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,386 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289058171; duration=0sec 2024-11-22T15:24:18,386 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:18,386 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:18,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289118395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289118395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289118401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289118406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289118447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,453 DEBUG [Thread-2454 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:24:18,498 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:18,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289118600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289118601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289118611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289118615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/12a5cf6595de422bb29581d3a582eb82 2024-11-22T15:24:18,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a37e6af2631f42128e922bb7d92206a7 is 50, key is test_row_0/B:col10/1732289058045/Put/seqid=0 2024-11-22T15:24:18,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742418_1594 (size=9757) 2024-11-22T15:24:18,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a37e6af2631f42128e922bb7d92206a7 2024-11-22T15:24:18,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9ee2c831366945c2bfcf24e6c8381db5 is 50, key is test_row_0/C:col10/1732289058045/Put/seqid=0 2024-11-22T15:24:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742419_1595 (size=9757) 2024-11-22T15:24:18,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9ee2c831366945c2bfcf24e6c8381db5 2024-11-22T15:24:18,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/12a5cf6595de422bb29581d3a582eb82 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82 2024-11-22T15:24:18,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82, entries=200, sequenceid=170, filesize=14.2 K 2024-11-22T15:24:18,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a37e6af2631f42128e922bb7d92206a7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7 2024-11-22T15:24:18,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7, entries=100, sequenceid=170, filesize=9.5 K 2024-11-22T15:24:18,803 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9ee2c831366945c2bfcf24e6c8381db5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5 2024-11-22T15:24:18,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5, entries=100, sequenceid=170, filesize=9.5 K 2024-11-22T15:24:18,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ed44e89acb87ffee72f4c7902667e851 in 638ms, sequenceid=170, compaction requested=true 2024-11-22T15:24:18,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:18,812 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:18,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:18,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:18,813 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:18,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:18,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:18,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:18,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. because compaction request was cancelled 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:18,814 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:18,815 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. because compaction request was cancelled 2024-11-22T15:24:18,815 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:18,815 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46200 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:18,815 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:18,815 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,815 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/70b31ad5aca14501a72592af90972a8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=45.1 K 2024-11-22T15:24:18,816 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70b31ad5aca14501a72592af90972a8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732289056848 2024-11-22T15:24:18,816 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff8443efcd3240fcb7f8c10cbcfa6474, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732289057192 2024-11-22T15:24:18,817 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting df537eec9935473ea63fd39ba7109178, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732289057925 2024-11-22T15:24:18,817 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ee2c831366945c2bfcf24e6c8381db5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732289058045 2024-11-22T15:24:18,845 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#504 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:18,845 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a8453c47a7324983a399da7d7b12efeb is 50, key is test_row_0/C:col10/1732289058045/Put/seqid=0 2024-11-22T15:24:18,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742420_1596 (size=12527) 2024-11-22T15:24:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:18,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:18,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:18,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0739e25e5d72436182332f9e6ee10438 is 50, key is test_row_0/A:col10/1732289058909/Put/seqid=0 2024-11-22T15:24:18,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289118934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289118941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289118942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289118943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:18,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:18,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:18,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742421_1597 (size=14541) 2024-11-22T15:24:18,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0739e25e5d72436182332f9e6ee10438 2024-11-22T15:24:18,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/b7338a095a544fe7ba0d797a87c4c47c is 50, key is test_row_0/B:col10/1732289058909/Put/seqid=0 2024-11-22T15:24:19,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742422_1598 (size=12151) 2024-11-22T15:24:19,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289119045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289119047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289119061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289119061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289119261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289119261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289119271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289119272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,301 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a8453c47a7324983a399da7d7b12efeb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a8453c47a7324983a399da7d7b12efeb 2024-11-22T15:24:19,305 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into a8453c47a7324983a399da7d7b12efeb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:19,305 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:19,305 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=12, startTime=1732289058813; duration=0sec 2024-11-22T15:24:19,305 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:19,305 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:19,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/b7338a095a544fe7ba0d797a87c4c47c 2024-11-22T15:24:19,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9d23b1c14b1447fb846c7cb3f98cbdfa is 50, key is test_row_0/C:col10/1732289058909/Put/seqid=0 2024-11-22T15:24:19,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742423_1599 (size=12151) 2024-11-22T15:24:19,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:19,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289119568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289119569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289119577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:19,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289119579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:19,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:19,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:19,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:19,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9d23b1c14b1447fb846c7cb3f98cbdfa 2024-11-22T15:24:19,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0739e25e5d72436182332f9e6ee10438 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438 2024-11-22T15:24:19,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438, entries=200, sequenceid=197, filesize=14.2 K 2024-11-22T15:24:19,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/b7338a095a544fe7ba0d797a87c4c47c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c 2024-11-22T15:24:19,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c, entries=150, sequenceid=197, filesize=11.9 K 2024-11-22T15:24:19,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/9d23b1c14b1447fb846c7cb3f98cbdfa as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa 2024-11-22T15:24:19,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa, entries=150, sequenceid=197, filesize=11.9 K 2024-11-22T15:24:19,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ed44e89acb87ffee72f4c7902667e851 in 1004ms, sequenceid=197, compaction requested=true 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:19,915 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:19,915 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:19,915 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:19,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:19,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:19,916 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:19,916 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1e97f0c27b614fd6812fbfc5fdccbd2f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=40.6 K 2024-11-22T15:24:19,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:19,917 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:19,917 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/762482e199d34201aa92bc171db22ef0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=33.6 K 2024-11-22T15:24:19,917 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e97f0c27b614fd6812fbfc5fdccbd2f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732289057925 2024-11-22T15:24:19,917 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 762482e199d34201aa92bc171db22ef0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732289057925 2024-11-22T15:24:19,917 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a37e6af2631f42128e922bb7d92206a7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732289058045 2024-11-22T15:24:19,917 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 12a5cf6595de422bb29581d3a582eb82, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732289058045 2024-11-22T15:24:19,918 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7338a095a544fe7ba0d797a87c4c47c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732289058201 2024-11-22T15:24:19,918 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0739e25e5d72436182332f9e6ee10438, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732289058201 2024-11-22T15:24:19,938 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#508 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:19,939 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/22b4f1413eea4fd3961515d794a9d128 is 50, key is test_row_0/B:col10/1732289058909/Put/seqid=0 2024-11-22T15:24:19,939 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#509 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:19,940 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/12b73b2b68f3430fb455b983a9df4f39 is 50, key is test_row_0/A:col10/1732289058909/Put/seqid=0 2024-11-22T15:24:19,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742424_1600 (size=12595) 2024-11-22T15:24:19,967 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/22b4f1413eea4fd3961515d794a9d128 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/22b4f1413eea4fd3961515d794a9d128 2024-11-22T15:24:19,973 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 22b4f1413eea4fd3961515d794a9d128(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:19,973 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:19,973 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289059915; duration=0sec 2024-11-22T15:24:19,973 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:19,973 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:19,973 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-22T15:24:19,974 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-22T15:24:19,974 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-22T15:24:19,974 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. because compaction request was cancelled 2024-11-22T15:24:19,974 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:19,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742425_1601 (size=12595) 2024-11-22T15:24:20,006 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/12b73b2b68f3430fb455b983a9df4f39 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12b73b2b68f3430fb455b983a9df4f39 2024-11-22T15:24:20,014 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 12b73b2b68f3430fb455b983a9df4f39(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:20,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:20,014 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289059915; duration=0sec 2024-11-22T15:24:20,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:20,014 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:20,025 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-22T15:24:20,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:20,025 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:20,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/2829fd5f50774e7dbb71836cfbf959ac is 50, key is test_row_0/A:col10/1732289058940/Put/seqid=0 2024-11-22T15:24:20,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:20,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742426_1602 (size=12151) 2024-11-22T15:24:20,088 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/2829fd5f50774e7dbb71836cfbf959ac 2024-11-22T15:24:20,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/4084b3a3a3aa48ab84eccd1824293a06 is 50, key is test_row_0/B:col10/1732289058940/Put/seqid=0 2024-11-22T15:24:20,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742427_1603 (size=12151) 2024-11-22T15:24:20,115 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/4084b3a3a3aa48ab84eccd1824293a06 2024-11-22T15:24:20,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5b8f070dc3d444129c055b9d28c6b099 is 50, key is test_row_0/C:col10/1732289058940/Put/seqid=0 2024-11-22T15:24:20,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289120128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289120136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289120136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289120137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742428_1604 (size=12151) 2024-11-22T15:24:20,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289120243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289120246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289120247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289120247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289120448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289120455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289120456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289120458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,577 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5b8f070dc3d444129c055b9d28c6b099 2024-11-22T15:24:20,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/2829fd5f50774e7dbb71836cfbf959ac as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac 2024-11-22T15:24:20,586 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac, entries=150, sequenceid=213, filesize=11.9 K 2024-11-22T15:24:20,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/4084b3a3a3aa48ab84eccd1824293a06 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06 2024-11-22T15:24:20,591 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06, entries=150, sequenceid=213, filesize=11.9 K 2024-11-22T15:24:20,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/5b8f070dc3d444129c055b9d28c6b099 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099 2024-11-22T15:24:20,594 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099, entries=150, sequenceid=213, filesize=11.9 K 2024-11-22T15:24:20,595 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed44e89acb87ffee72f4c7902667e851 in 570ms, sequenceid=213, compaction requested=true 2024-11-22T15:24:20,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:20,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:20,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-22T15:24:20,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-22T15:24:20,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-22T15:24:20,597 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1670 sec 2024-11-22T15:24:20,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 3.1710 sec 2024-11-22T15:24:20,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:24:20,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:20,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:20,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:20,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:20,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:20,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 is 50, key is test_row_0/A:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:20,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742429_1605 (size=14541) 2024-11-22T15:24:20,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289120782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289120783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289120785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289120785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289120889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289120896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289120896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:20,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:20,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289120896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289121099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289121103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289121104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289121105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 2024-11-22T15:24:21,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 is 50, key is test_row_0/B:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:21,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742430_1606 (size=12151) 2024-11-22T15:24:21,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 2024-11-22T15:24:21,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/dca4f86853be4e4fb8ed06909a67aeed is 50, key is test_row_0/C:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:21,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742431_1607 (size=12151) 2024-11-22T15:24:21,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289121409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289121413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289121413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289121413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-22T15:24:21,534 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-22T15:24:21,535 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-22T15:24:21,536 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T15:24:21,537 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:21,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T15:24:21,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/dca4f86853be4e4fb8ed06909a67aeed 2024-11-22T15:24:21,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 2024-11-22T15:24:21,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233, entries=200, sequenceid=237, filesize=14.2 K 2024-11-22T15:24:21,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 2024-11-22T15:24:21,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T15:24:21,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/dca4f86853be4e4fb8ed06909a67aeed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed 2024-11-22T15:24:21,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed, entries=150, sequenceid=237, filesize=11.9 K 2024-11-22T15:24:21,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ed44e89acb87ffee72f4c7902667e851 in 915ms, sequenceid=237, compaction requested=true 2024-11-22T15:24:21,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:21,676 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:21,676 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:21,677 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:21,677 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:21,678 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:21,678 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:21,678 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:21,678 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:21,678 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12b73b2b68f3430fb455b983a9df4f39, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=38.4 K 2024-11-22T15:24:21,678 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/22b4f1413eea4fd3961515d794a9d128, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.0 K 2024-11-22T15:24:21,678 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12b73b2b68f3430fb455b983a9df4f39, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732289058201 2024-11-22T15:24:21,679 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2829fd5f50774e7dbb71836cfbf959ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732289058939 2024-11-22T15:24:21,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 22b4f1413eea4fd3961515d794a9d128, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732289058201 2024-11-22T15:24:21,679 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf5ff1f8f05b4cdaacf68f3fc59ca233, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060123 2024-11-22T15:24:21,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4084b3a3a3aa48ab84eccd1824293a06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732289058939 2024-11-22T15:24:21,680 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e4cada4bfd9a4ff0aa71a3fe32aa3d82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060126 2024-11-22T15:24:21,688 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:21,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-22T15:24:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:21,689 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:24:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:21,693 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#516 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:21,693 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/eb978b6cebeb47f29957ef518ded005a is 50, key is test_row_0/A:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:21,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/067b4110d8a149c494c6be3ad1357ca8 is 50, key is test_row_0/A:col10/1732289060782/Put/seqid=0 2024-11-22T15:24:21,700 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#518 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:21,701 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f7cc5715e9ae4a348c84d6e589bc6636 is 50, key is test_row_0/B:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:21,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742432_1608 (size=12697) 2024-11-22T15:24:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742433_1609 (size=12151) 2024-11-22T15:24:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742434_1610 (size=12697) 2024-11-22T15:24:21,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T15:24:21,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:21,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:21,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:21,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289121980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289121985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289121986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289121987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289122093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289122101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289122101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289122101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T15:24:22,142 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/eb978b6cebeb47f29957ef518ded005a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/eb978b6cebeb47f29957ef518ded005a 2024-11-22T15:24:22,145 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/067b4110d8a149c494c6be3ad1357ca8 2024-11-22T15:24:22,147 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into eb978b6cebeb47f29957ef518ded005a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:22,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,147 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289061675; duration=0sec 2024-11-22T15:24:22,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:22,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:22,147 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:22,149 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:22,149 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:22,149 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,150 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a8453c47a7324983a399da7d7b12efeb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=47.8 K 2024-11-22T15:24:22,150 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8453c47a7324983a399da7d7b12efeb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732289057934 2024-11-22T15:24:22,151 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d23b1c14b1447fb846c7cb3f98cbdfa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732289058201 2024-11-22T15:24:22,152 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b8f070dc3d444129c055b9d28c6b099, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732289058939 2024-11-22T15:24:22,152 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dca4f86853be4e4fb8ed06909a67aeed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060126 2024-11-22T15:24:22,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/3cc02ecb669e49f8802d43d00c054fe6 is 50, key is test_row_0/B:col10/1732289060782/Put/seqid=0 2024-11-22T15:24:22,162 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:22,162 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c4e2c80981a84e3b926369b5dbef7f17 is 50, key is test_row_0/C:col10/1732289060126/Put/seqid=0 2024-11-22T15:24:22,166 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f7cc5715e9ae4a348c84d6e589bc6636 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f7cc5715e9ae4a348c84d6e589bc6636 2024-11-22T15:24:22,170 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into f7cc5715e9ae4a348c84d6e589bc6636(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:22,170 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,170 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289061676; duration=0sec 2024-11-22T15:24:22,170 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:22,170 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:22,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742435_1611 (size=12151) 2024-11-22T15:24:22,199 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/3cc02ecb669e49f8802d43d00c054fe6 2024-11-22T15:24:22,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742436_1612 (size=12663) 2024-11-22T15:24:22,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6cdd2e013d094bb0abaadb488601c1e2 is 50, key is test_row_0/C:col10/1732289060782/Put/seqid=0 2024-11-22T15:24:22,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742437_1613 (size=12151) 2024-11-22T15:24:22,288 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6cdd2e013d094bb0abaadb488601c1e2 2024-11-22T15:24:22,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/067b4110d8a149c494c6be3ad1357ca8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8 2024-11-22T15:24:22,297 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8, entries=150, sequenceid=249, filesize=11.9 K 2024-11-22T15:24:22,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/3cc02ecb669e49f8802d43d00c054fe6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6 2024-11-22T15:24:22,302 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6, entries=150, sequenceid=249, filesize=11.9 K 2024-11-22T15:24:22,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6cdd2e013d094bb0abaadb488601c1e2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2 2024-11-22T15:24:22,308 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2, entries=150, sequenceid=249, filesize=11.9 K 2024-11-22T15:24:22,309 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed44e89acb87ffee72f4c7902667e851 in 619ms, sequenceid=249, compaction requested=false 2024-11-22T15:24:22,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-22T15:24:22,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-22T15:24:22,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-22T15:24:22,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 774 msec 2024-11-22T15:24:22,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 780 msec 2024-11-22T15:24:22,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:22,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dbb32d55ff004f0f8e5d809b933674fe is 50, key is test_row_0/A:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:22,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289122344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289122344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289122345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742438_1614 (size=12301) 2024-11-22T15:24:22,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289122353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289122454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289122454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289122455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289122461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289122479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,488 DEBUG [Thread-2454 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8229 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:24:22,636 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c4e2c80981a84e3b926369b5dbef7f17 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c4e2c80981a84e3b926369b5dbef7f17 2024-11-22T15:24:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-22T15:24:22,640 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-22T15:24:22,641 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into c4e2c80981a84e3b926369b5dbef7f17(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:22,641 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,641 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=12, startTime=1732289061676; duration=0sec 2024-11-22T15:24:22,641 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:22,641 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:22,641 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-22T15:24:22,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T15:24:22,644 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:22,644 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:22,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:22,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289122660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289122661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289122661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289122670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T15:24:22,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dbb32d55ff004f0f8e5d809b933674fe 2024-11-22T15:24:22,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/42cfd2ebbf0b470aaa06aa630eba3c00 is 50, key is test_row_0/B:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:22,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742439_1615 (size=12301) 2024-11-22T15:24:22,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/42cfd2ebbf0b470aaa06aa630eba3c00 2024-11-22T15:24:22,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T15:24:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:22,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/4b9c2ef9b8c0419585dfe870625d5f05 is 50, key is test_row_0/C:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742440_1616 (size=12301) 2024-11-22T15:24:22,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/4b9c2ef9b8c0419585dfe870625d5f05 2024-11-22T15:24:22,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dbb32d55ff004f0f8e5d809b933674fe as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe 2024-11-22T15:24:22,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe, entries=150, sequenceid=276, filesize=12.0 K 2024-11-22T15:24:22,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/42cfd2ebbf0b470aaa06aa630eba3c00 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00 2024-11-22T15:24:22,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00, entries=150, sequenceid=276, filesize=12.0 K 2024-11-22T15:24:22,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/4b9c2ef9b8c0419585dfe870625d5f05 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05 2024-11-22T15:24:22,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05, entries=150, sequenceid=276, filesize=12.0 K 2024-11-22T15:24:22,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed44e89acb87ffee72f4c7902667e851 in 557ms, sequenceid=276, compaction requested=true 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:22,877 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:22,877 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:22,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:22,878 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:22,878 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:22,878 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,878 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:22,878 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:22,878 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/eb978b6cebeb47f29957ef518ded005a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.3 K 2024-11-22T15:24:22,878 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,878 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f7cc5715e9ae4a348c84d6e589bc6636, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.3 K 2024-11-22T15:24:22,878 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb978b6cebeb47f29957ef518ded005a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060126 2024-11-22T15:24:22,879 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f7cc5715e9ae4a348c84d6e589bc6636, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060126 2024-11-22T15:24:22,880 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 067b4110d8a149c494c6be3ad1357ca8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732289060771 2024-11-22T15:24:22,880 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cc02ecb669e49f8802d43d00c054fe6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732289060771 2024-11-22T15:24:22,880 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 42cfd2ebbf0b470aaa06aa630eba3c00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:22,880 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbb32d55ff004f0f8e5d809b933674fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:22,892 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:22,894 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a9a26e26cde84fd7a0d240a85d48c98d is 50, key is test_row_0/B:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:22,895 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:22,895 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/693c4767a7014770afcac4a6f137acb4 is 50, key is test_row_0/A:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:22,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T15:24:22,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:22,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,951 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:22,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742441_1617 (size=12949) 2024-11-22T15:24:22,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742442_1618 (size=12949) 2024-11-22T15:24:22,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 is 50, key is test_row_0/A:col10/1732289062324/Put/seqid=0 2024-11-22T15:24:22,968 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/693c4767a7014770afcac4a6f137acb4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/693c4767a7014770afcac4a6f137acb4 2024-11-22T15:24:22,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:22,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:22,975 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 693c4767a7014770afcac4a6f137acb4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:22,975 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:22,975 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289062877; duration=0sec 2024-11-22T15:24:22,975 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:22,975 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:22,976 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:22,978 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:22,978 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:22,978 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:22,978 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c4e2c80981a84e3b926369b5dbef7f17, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.2 K 2024-11-22T15:24:22,978 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4e2c80981a84e3b926369b5dbef7f17, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732289060126 2024-11-22T15:24:22,979 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cdd2e013d094bb0abaadb488601c1e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732289060771 2024-11-22T15:24:22,979 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b9c2ef9b8c0419585dfe870625d5f05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:22,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742443_1619 (size=12301) 2024-11-22T15:24:22,993 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:24:22,993 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 2024-11-22T15:24:23,005 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#528 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:23,006 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/91446375bc5b48fbbad794c8a7ddee91 is 50, key is test_row_0/C:col10/1732289061986/Put/seqid=0 2024-11-22T15:24:23,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/8746eb853b07408d8b49547bfba4c0a0 is 50, key is test_row_0/B:col10/1732289062324/Put/seqid=0 2024-11-22T15:24:23,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742444_1620 (size=12915) 2024-11-22T15:24:23,075 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/91446375bc5b48fbbad794c8a7ddee91 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/91446375bc5b48fbbad794c8a7ddee91 2024-11-22T15:24:23,080 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 91446375bc5b48fbbad794c8a7ddee91(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:23,080 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:23,080 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289062877; duration=0sec 2024-11-22T15:24:23,080 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:23,080 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:23,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742445_1621 (size=12301) 2024-11-22T15:24:23,105 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/8746eb853b07408d8b49547bfba4c0a0 2024-11-22T15:24:23,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/13bc304af6fb46b182d4cffab874fb37 is 50, key is test_row_0/C:col10/1732289062324/Put/seqid=0 2024-11-22T15:24:23,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742446_1622 (size=12301) 2024-11-22T15:24:23,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T15:24:23,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,360 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a9a26e26cde84fd7a0d240a85d48c98d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a9a26e26cde84fd7a0d240a85d48c98d 2024-11-22T15:24:23,366 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into a9a26e26cde84fd7a0d240a85d48c98d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:23,366 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:23,366 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289062877; duration=0sec 2024-11-22T15:24:23,366 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:23,366 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:23,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,554 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/13bc304af6fb46b182d4cffab874fb37 2024-11-22T15:24:23,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 2024-11-22T15:24:23,562 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7, entries=150, sequenceid=288, filesize=12.0 K 2024-11-22T15:24:23,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/8746eb853b07408d8b49547bfba4c0a0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0 2024-11-22T15:24:23,569 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0, entries=150, sequenceid=288, filesize=12.0 K 2024-11-22T15:24:23,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/13bc304af6fb46b182d4cffab874fb37 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37 2024-11-22T15:24:23,576 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37, entries=150, sequenceid=288, filesize=12.0 K 2024-11-22T15:24:23,577 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 625ms, sequenceid=288, compaction requested=false 2024-11-22T15:24:23,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:23,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:23,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-22T15:24:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-22T15:24:23,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-22T15:24:23,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 934 msec 2024-11-22T15:24:23,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 938 msec 2024-11-22T15:24:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:23,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:23,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:23,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/aa92b8c8a0cf44f694dd9c6b36c675b9 is 50, key is test_row_0/A:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:23,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742447_1623 (size=14741) 2024-11-22T15:24:23,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/aa92b8c8a0cf44f694dd9c6b36c675b9 2024-11-22T15:24:23,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/131414e19f5d4a4a86b7bd98d2d0d0e3 is 50, key is test_row_0/B:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-22T15:24:23,746 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-22T15:24:23,749 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-22T15:24:23,750 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:23,751 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:23,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:23,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742448_1624 (size=12301) 2024-11-22T15:24:23,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:23,902 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:23,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:23,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:23,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:23,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:23,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:23,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:23,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289123992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:23,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:23,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289123993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289123994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289123995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:24,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:24,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:24,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/131414e19f5d4a4a86b7bd98d2d0d0e3 2024-11-22T15:24:24,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/54eaf4373b764e40bdd2c83d30f0f4d4 is 50, key is test_row_0/C:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:24,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:24,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:24,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742449_1625 (size=12301) 2024-11-22T15:24:24,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/54eaf4373b764e40bdd2c83d30f0f4d4 2024-11-22T15:24:24,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/aa92b8c8a0cf44f694dd9c6b36c675b9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9 2024-11-22T15:24:24,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9, entries=200, sequenceid=317, filesize=14.4 K 2024-11-22T15:24:24,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/131414e19f5d4a4a86b7bd98d2d0d0e3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3 2024-11-22T15:24:24,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3, entries=150, sequenceid=317, filesize=12.0 K 2024-11-22T15:24:24,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/54eaf4373b764e40bdd2c83d30f0f4d4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4 2024-11-22T15:24:24,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4, entries=150, sequenceid=317, filesize=12.0 K 2024-11-22T15:24:24,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed44e89acb87ffee72f4c7902667e851 in 558ms, sequenceid=317, compaction requested=true 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:24,226 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:24,226 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:24,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:24,227 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,227 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,227 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/693c4767a7014770afcac4a6f137acb4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=39.1 K 2024-11-22T15:24:24,227 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a9a26e26cde84fd7a0d240a85d48c98d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.7 K 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 693c4767a7014770afcac4a6f137acb4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a9a26e26cde84fd7a0d240a85d48c98d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:24,227 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3caa6275b6b4cd8a82fe5c5e9b367f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289062324 2024-11-22T15:24:24,228 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8746eb853b07408d8b49547bfba4c0a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289062324 2024-11-22T15:24:24,228 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa92b8c8a0cf44f694dd9c6b36c675b9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063030 2024-11-22T15:24:24,228 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 131414e19f5d4a4a86b7bd98d2d0d0e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063031 2024-11-22T15:24:24,240 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#534 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:24,240 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95 is 50, key is test_row_0/B:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:24,248 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:24,248 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/f423b6dc354f47bd94d1790e01b8439e is 50, key is test_row_0/A:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:24,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742450_1626 (size=13051) 2024-11-22T15:24:24,297 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95 2024-11-22T15:24:24,306 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 8a0ceca5ce7d49f7bc60ebf71cb4bc95(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:24,306 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:24,306 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289064226; duration=0sec 2024-11-22T15:24:24,306 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:24,306 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:24,307 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:24,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:24,309 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:24,309 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:24,309 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,309 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/91446375bc5b48fbbad794c8a7ddee91, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=36.6 K 2024-11-22T15:24:24,310 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 91446375bc5b48fbbad794c8a7ddee91, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732289061985 2024-11-22T15:24:24,310 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 13bc304af6fb46b182d4cffab874fb37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732289062324 2024-11-22T15:24:24,311 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 54eaf4373b764e40bdd2c83d30f0f4d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063031 2024-11-22T15:24:24,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:24,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:24,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:24,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742451_1627 (size=13051) 2024-11-22T15:24:24,326 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/f423b6dc354f47bd94d1790e01b8439e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/f423b6dc354f47bd94d1790e01b8439e 2024-11-22T15:24:24,330 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into f423b6dc354f47bd94d1790e01b8439e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:24,330 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:24,330 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289064226; duration=0sec 2024-11-22T15:24:24,331 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:24,331 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:24,336 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#536 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:24,336 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/1b2c0e8321b34c93aa0f56bcd22ad983 is 50, key is test_row_0/C:col10/1732289063031/Put/seqid=0 2024-11-22T15:24:24,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/afba40a0b51e46d49f0d820f69675555 is 50, key is test_row_0/A:col10/1732289064309/Put/seqid=0 2024-11-22T15:24:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:24,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742452_1628 (size=13017) 2024-11-22T15:24:24,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:24,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:24,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,367 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/1b2c0e8321b34c93aa0f56bcd22ad983 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1b2c0e8321b34c93aa0f56bcd22ad983 2024-11-22T15:24:24,372 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 1b2c0e8321b34c93aa0f56bcd22ad983(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:24,372 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:24,372 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289064226; duration=0sec 2024-11-22T15:24:24,372 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:24,372 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:24,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289124367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289124373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742453_1629 (size=14741) 2024-11-22T15:24:24,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/afba40a0b51e46d49f0d820f69675555 2024-11-22T15:24:24,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289124380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289124381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a8f3ced3bda44c4faa16d896ba19b267 is 50, key is test_row_0/B:col10/1732289064309/Put/seqid=0 2024-11-22T15:24:24,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742454_1630 (size=12301) 2024-11-22T15:24:24,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a8f3ced3bda44c4faa16d896ba19b267 2024-11-22T15:24:24,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/56a95d91224246aca8eab940acac3ff7 is 50, key is test_row_0/C:col10/1732289064309/Put/seqid=0 2024-11-22T15:24:24,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289124482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289124483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289124489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289124489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:24,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:24,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:24,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742455_1631 (size=12301) 2024-11-22T15:24:24,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/56a95d91224246aca8eab940acac3ff7 2024-11-22T15:24:24,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/afba40a0b51e46d49f0d820f69675555 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555 2024-11-22T15:24:24,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555, entries=200, sequenceid=330, filesize=14.4 K 2024-11-22T15:24:24,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/a8f3ced3bda44c4faa16d896ba19b267 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267 2024-11-22T15:24:24,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267, entries=150, sequenceid=330, filesize=12.0 K 2024-11-22T15:24:24,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/56a95d91224246aca8eab940acac3ff7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7 2024-11-22T15:24:24,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7, entries=150, sequenceid=330, filesize=12.0 K 2024-11-22T15:24:24,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 226ms, sequenceid=330, compaction requested=false 2024-11-22T15:24:24,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:24,665 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-22T15:24:24,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:24,665 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:24,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:24,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/250c839f44e749a09716cfbac0f83096 is 50, key is test_row_0/A:col10/1732289064379/Put/seqid=0 2024-11-22T15:24:24,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742456_1632 (size=12301) 2024-11-22T15:24:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:24,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:24,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289124703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289124709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289124710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289124710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289124812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289124818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289124818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289124819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:24,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:25,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289125014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289125026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289125026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289125026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,086 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/250c839f44e749a09716cfbac0f83096 2024-11-22T15:24:25,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/5240c96346244b5fa7edd7fb8c3abe37 is 50, key is test_row_0/B:col10/1732289064379/Put/seqid=0 2024-11-22T15:24:25,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742457_1633 (size=12301) 2024-11-22T15:24:25,118 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/5240c96346244b5fa7edd7fb8c3abe37 2024-11-22T15:24:25,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/20e31522cf07400cb871e40864ac4e28 is 50, key is test_row_0/C:col10/1732289064379/Put/seqid=0 2024-11-22T15:24:25,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742458_1634 (size=12301) 2024-11-22T15:24:25,144 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/20e31522cf07400cb871e40864ac4e28 2024-11-22T15:24:25,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/250c839f44e749a09716cfbac0f83096 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096 2024-11-22T15:24:25,151 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T15:24:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/5240c96346244b5fa7edd7fb8c3abe37 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37 2024-11-22T15:24:25,155 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T15:24:25,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/20e31522cf07400cb871e40864ac4e28 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28 2024-11-22T15:24:25,164 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28, entries=150, sequenceid=357, filesize=12.0 K 2024-11-22T15:24:25,164 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed44e89acb87ffee72f4c7902667e851 in 499ms, sequenceid=357, compaction requested=true 2024-11-22T15:24:25,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:25,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:25,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-22T15:24:25,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-22T15:24:25,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-22T15:24:25,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4150 sec 2024-11-22T15:24:25,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4180 sec 2024-11-22T15:24:25,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:25,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0719449ff7a54984b655f4cea7699a5d is 50, key is test_row_0/A:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742459_1635 (size=12301) 2024-11-22T15:24:25,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0719449ff7a54984b655f4cea7699a5d 2024-11-22T15:24:25,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/94e9f3a7861746b6816ddd6841c653d8 is 50, key is test_row_0/B:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742460_1636 (size=12301) 2024-11-22T15:24:25,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/94e9f3a7861746b6816ddd6841c653d8 2024-11-22T15:24:25,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/3e8e1d3296b34f79b671aacce28f677c is 50, key is test_row_0/C:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289125411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289125414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289125415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742461_1637 (size=12301) 2024-11-22T15:24:25,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/3e8e1d3296b34f79b671aacce28f677c 2024-11-22T15:24:25,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289125425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/0719449ff7a54984b655f4cea7699a5d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d 2024-11-22T15:24:25,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d, entries=150, sequenceid=369, filesize=12.0 K 2024-11-22T15:24:25,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/94e9f3a7861746b6816ddd6841c653d8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8 2024-11-22T15:24:25,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8, entries=150, sequenceid=369, filesize=12.0 K 2024-11-22T15:24:25,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/3e8e1d3296b34f79b671aacce28f677c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c 2024-11-22T15:24:25,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c, entries=150, sequenceid=369, filesize=12.0 K 2024-11-22T15:24:25,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ed44e89acb87ffee72f4c7902667e851 in 129ms, sequenceid=369, compaction requested=true 2024-11-22T15:24:25,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:25,458 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:25,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:25,460 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52394 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:25,460 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:25,460 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:25,461 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/f423b6dc354f47bd94d1790e01b8439e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=51.2 K 2024-11-22T15:24:25,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f423b6dc354f47bd94d1790e01b8439e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063031 2024-11-22T15:24:25,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting afba40a0b51e46d49f0d820f69675555, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732289063675 2024-11-22T15:24:25,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 250c839f44e749a09716cfbac0f83096, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732289064359 2024-11-22T15:24:25,461 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0719449ff7a54984b655f4cea7699a5d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:25,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:25,467 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:25,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:25,468 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:25,468 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:25,468 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:25,468 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=48.8 K 2024-11-22T15:24:25,468 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a0ceca5ce7d49f7bc60ebf71cb4bc95, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063031 2024-11-22T15:24:25,469 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a8f3ced3bda44c4faa16d896ba19b267, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732289063675 2024-11-22T15:24:25,469 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5240c96346244b5fa7edd7fb8c3abe37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732289064359 2024-11-22T15:24:25,469 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 94e9f3a7861746b6816ddd6841c653d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:25,477 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#546 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:25,477 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/822ee1c78e2244228d88de8de0f4481c is 50, key is test_row_0/A:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,490 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#547 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:25,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/bbadeaf50ab44844a4a0a5899f9b843b is 50, key is test_row_0/B:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742462_1638 (size=13187) 2024-11-22T15:24:25,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742463_1639 (size=13187) 2024-11-22T15:24:25,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:25,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:25,538 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/bbadeaf50ab44844a4a0a5899f9b843b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/bbadeaf50ab44844a4a0a5899f9b843b 2024-11-22T15:24:25,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/9fd60e78a3244509989d3853d6ec97d7 is 50, key is test_row_0/A:col10/1732289065423/Put/seqid=0 2024-11-22T15:24:25,548 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into bbadeaf50ab44844a4a0a5899f9b843b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:25,548 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:25,548 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=12, startTime=1732289065467; duration=0sec 2024-11-22T15:24:25,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:25,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:25,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:25,551 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:25,552 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:25,552 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:25,552 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1b2c0e8321b34c93aa0f56bcd22ad983, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=48.8 K 2024-11-22T15:24:25,552 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b2c0e8321b34c93aa0f56bcd22ad983, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732289063031 2024-11-22T15:24:25,552 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 56a95d91224246aca8eab940acac3ff7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732289063675 2024-11-22T15:24:25,553 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 20e31522cf07400cb871e40864ac4e28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732289064359 2024-11-22T15:24:25,553 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e8e1d3296b34f79b671aacce28f677c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:25,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289125551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289125552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289125556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289125562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,577 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#549 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:25,577 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/7bd6f68435fd44b6b12753d15884c77f is 50, key is test_row_0/C:col10/1732289064709/Put/seqid=0 2024-11-22T15:24:25,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742464_1640 (size=14741) 2024-11-22T15:24:25,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742465_1641 (size=13153) 2024-11-22T15:24:25,622 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/7bd6f68435fd44b6b12753d15884c77f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/7bd6f68435fd44b6b12753d15884c77f 2024-11-22T15:24:25,630 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 7bd6f68435fd44b6b12753d15884c77f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:25,630 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:25,631 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=12, startTime=1732289065467; duration=0sec 2024-11-22T15:24:25,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:25,631 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:25,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289125665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289125666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289125669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289125670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-22T15:24:25,855 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-22T15:24:25,856 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-22T15:24:25,857 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T15:24:25,858 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:25,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:25,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289125867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289125873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289125876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289125876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:25,903 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/822ee1c78e2244228d88de8de0f4481c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/822ee1c78e2244228d88de8de0f4481c 2024-11-22T15:24:25,908 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 822ee1c78e2244228d88de8de0f4481c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:25,908 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:25,908 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=12, startTime=1732289065458; duration=0sec 2024-11-22T15:24:25,908 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:25,908 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T15:24:25,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/9fd60e78a3244509989d3853d6ec97d7 2024-11-22T15:24:26,009 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T15:24:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:26,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:26,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:26,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/28c3e036d8784e4ab7a39d5c9b7fd73c is 50, key is test_row_0/B:col10/1732289065423/Put/seqid=0 2024-11-22T15:24:26,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742466_1642 (size=12301) 2024-11-22T15:24:26,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/28c3e036d8784e4ab7a39d5c9b7fd73c 2024-11-22T15:24:26,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ea139240038b45eb832fb0e83914143d is 50, key is test_row_0/C:col10/1732289065423/Put/seqid=0 2024-11-22T15:24:26,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742467_1643 (size=12301) 2024-11-22T15:24:26,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ea139240038b45eb832fb0e83914143d 2024-11-22T15:24:26,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/9fd60e78a3244509989d3853d6ec97d7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7 2024-11-22T15:24:26,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7, entries=200, sequenceid=394, filesize=14.4 K 2024-11-22T15:24:26,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/28c3e036d8784e4ab7a39d5c9b7fd73c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c 2024-11-22T15:24:26,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c, entries=150, sequenceid=394, filesize=12.0 K 2024-11-22T15:24:26,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/ea139240038b45eb832fb0e83914143d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d 2024-11-22T15:24:26,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d, entries=150, sequenceid=394, filesize=12.0 K 2024-11-22T15:24:26,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ed44e89acb87ffee72f4c7902667e851 in 581ms, sequenceid=394, compaction requested=false 2024-11-22T15:24:26,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T15:24:26,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-22T15:24:26,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:26,162 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:26,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:26,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:26,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:26,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/13f5ebc9be534cdab9951bb02b289b6d is 50, key is test_row_0/A:col10/1732289065554/Put/seqid=0 2024-11-22T15:24:26,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742468_1644 (size=12301) 2024-11-22T15:24:26,200 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/13f5ebc9be534cdab9951bb02b289b6d 2024-11-22T15:24:26,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/389d3bf5d1f8450f984335917a4b8ce4 is 50, key is test_row_0/B:col10/1732289065554/Put/seqid=0 2024-11-22T15:24:26,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742469_1645 (size=12301) 2024-11-22T15:24:26,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289126222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289126233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289126233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289126233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289126334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289126344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289126344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289126344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T15:24:26,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289126540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289126549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289126549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289126549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,626 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/389d3bf5d1f8450f984335917a4b8ce4 2024-11-22T15:24:26,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/334fedd0e96b49b296d164e2ef056504 is 50, key is test_row_0/C:col10/1732289065554/Put/seqid=0 2024-11-22T15:24:26,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742470_1646 (size=12301) 2024-11-22T15:24:26,674 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/334fedd0e96b49b296d164e2ef056504 2024-11-22T15:24:26,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/13f5ebc9be534cdab9951bb02b289b6d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d 2024-11-22T15:24:26,685 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:24:26,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/389d3bf5d1f8450f984335917a4b8ce4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4 2024-11-22T15:24:26,693 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:24:26,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/334fedd0e96b49b296d164e2ef056504 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504 2024-11-22T15:24:26,706 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504, entries=150, sequenceid=408, filesize=12.0 K 2024-11-22T15:24:26,708 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 546ms, sequenceid=408, compaction requested=true 2024-11-22T15:24:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:26,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-22T15:24:26,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-22T15:24:26,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-22T15:24:26,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 851 msec 2024-11-22T15:24:26,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 856 msec 2024-11-22T15:24:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:26,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:26,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:26,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:26,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:26,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:26,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dcb6c72f031d460fa1c42b126968f66e is 50, key is test_row_0/A:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:26,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289126871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289126871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289126876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742471_1647 (size=14741) 2024-11-22T15:24:26,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289126883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-22T15:24:26,962 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-22T15:24:26,963 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-22T15:24:26,964 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:26,965 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:26,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:26,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:26,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289126983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289126983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289126984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:26,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289126994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:27,117 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289127188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289127189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289127190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289127200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:27,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dcb6c72f031d460fa1c42b126968f66e 2024-11-22T15:24:27,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/27003832ca1e470ba6679d61c400a582 is 50, key is test_row_0/B:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:27,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742472_1648 (size=12301) 2024-11-22T15:24:27,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/27003832ca1e470ba6679d61c400a582 2024-11-22T15:24:27,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6e6b9d8737f542d9b36b63bdc1ac65a9 is 50, key is test_row_0/C:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742473_1649 (size=12301) 2024-11-22T15:24:27,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6e6b9d8737f542d9b36b63bdc1ac65a9 2024-11-22T15:24:27,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/dcb6c72f031d460fa1c42b126968f66e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e 2024-11-22T15:24:27,425 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e, entries=200, sequenceid=434, filesize=14.4 K 2024-11-22T15:24:27,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/27003832ca1e470ba6679d61c400a582 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582 2024-11-22T15:24:27,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582, entries=150, sequenceid=434, filesize=12.0 K 2024-11-22T15:24:27,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/6e6b9d8737f542d9b36b63bdc1ac65a9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9 2024-11-22T15:24:27,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9, entries=150, sequenceid=434, filesize=12.0 K 2024-11-22T15:24:27,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed44e89acb87ffee72f4c7902667e851 in 588ms, sequenceid=434, compaction requested=true 2024-11-22T15:24:27,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:27,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:27,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:27,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:27,445 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:27,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:27,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:27,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:27,445 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:27,446 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54970 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:27,446 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:27,446 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,446 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/822ee1c78e2244228d88de8de0f4481c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=53.7 K 2024-11-22T15:24:27,446 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:27,446 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:27,446 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,446 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/bbadeaf50ab44844a4a0a5899f9b843b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=48.9 K 2024-11-22T15:24:27,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 822ee1c78e2244228d88de8de0f4481c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:27,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbadeaf50ab44844a4a0a5899f9b843b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:27,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd60e78a3244509989d3853d6ec97d7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732289065398 2024-11-22T15:24:27,447 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28c3e036d8784e4ab7a39d5c9b7fd73c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732289065407 2024-11-22T15:24:27,447 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 13f5ebc9be534cdab9951bb02b289b6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289065551 2024-11-22T15:24:27,448 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 389d3bf5d1f8450f984335917a4b8ce4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289065551 2024-11-22T15:24:27,448 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting dcb6c72f031d460fa1c42b126968f66e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:27,448 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27003832ca1e470ba6679d61c400a582, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:27,465 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#558 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:27,465 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/1289b7ff4b5342d6bea091e8db7eae0c is 50, key is test_row_0/A:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:27,475 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:27,476 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/90948a61eb05443fb63f0e284b4261d9 is 50, key is test_row_0/B:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:27,503 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-22T15:24:27,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:27,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:27,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:27,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742474_1650 (size=13323) 2024-11-22T15:24:27,534 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/1289b7ff4b5342d6bea091e8db7eae0c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1289b7ff4b5342d6bea091e8db7eae0c 2024-11-22T15:24:27,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742475_1651 (size=13323) 2024-11-22T15:24:27,546 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 1289b7ff4b5342d6bea091e8db7eae0c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:27,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:27,546 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=12, startTime=1732289067444; duration=0sec 2024-11-22T15:24:27,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:27,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:27,546 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:27,548 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:27,548 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:27,548 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,548 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/7bd6f68435fd44b6b12753d15884c77f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=48.9 K 2024-11-22T15:24:27,549 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bd6f68435fd44b6b12753d15884c77f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732289064707 2024-11-22T15:24:27,550 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ea139240038b45eb832fb0e83914143d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732289065407 2024-11-22T15:24:27,550 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 334fedd0e96b49b296d164e2ef056504, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1732289065551 2024-11-22T15:24:27,551 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e6b9d8737f542d9b36b63bdc1ac65a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:27,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/97ad8fc80ae145b3b08b24a65b7a8f95 is 50, key is test_row_0/A:col10/1732289066865/Put/seqid=0 2024-11-22T15:24:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:27,577 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#561 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:27,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,578 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/845fb15e6518464c8b73c577a77a799b is 50, key is test_row_0/C:col10/1732289066230/Put/seqid=0 2024-11-22T15:24:27,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742476_1652 (size=14741) 2024-11-22T15:24:27,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/97ad8fc80ae145b3b08b24a65b7a8f95 2024-11-22T15:24:27,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289127592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289127595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289127596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289127599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/6d20a38735a94e42be95384a6cd24508 is 50, key is test_row_0/B:col10/1732289066865/Put/seqid=0 2024-11-22T15:24:27,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742477_1653 (size=13289) 2024-11-22T15:24:27,623 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/845fb15e6518464c8b73c577a77a799b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/845fb15e6518464c8b73c577a77a799b 2024-11-22T15:24:27,630 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 845fb15e6518464c8b73c577a77a799b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:27,630 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:27,630 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=12, startTime=1732289067445; duration=0sec 2024-11-22T15:24:27,630 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:27,630 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742478_1654 (size=12301) 2024-11-22T15:24:27,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/6d20a38735a94e42be95384a6cd24508 2024-11-22T15:24:27,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/36eb7ddd29184f468cbf020bf16a76f0 is 50, key is test_row_0/C:col10/1732289066865/Put/seqid=0 2024-11-22T15:24:27,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742479_1655 (size=12301) 2024-11-22T15:24:27,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/36eb7ddd29184f468cbf020bf16a76f0 2024-11-22T15:24:27,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/97ad8fc80ae145b3b08b24a65b7a8f95 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95 2024-11-22T15:24:27,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95, entries=200, sequenceid=447, filesize=14.4 K 2024-11-22T15:24:27,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/6d20a38735a94e42be95384a6cd24508 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508 2024-11-22T15:24:27,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508, entries=150, sequenceid=447, filesize=12.0 K 2024-11-22T15:24:27,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/36eb7ddd29184f468cbf020bf16a76f0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0 2024-11-22T15:24:27,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0, entries=150, sequenceid=447, filesize=12.0 K 2024-11-22T15:24:27,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ed44e89acb87ffee72f4c7902667e851 in 180ms, sequenceid=447, compaction requested=false 2024-11-22T15:24:27,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:27,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:27,715 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:27,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4060fdeec83a46e08ec1a8d06108ec2a is 50, key is test_row_0/A:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:27,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742480_1656 (size=19621) 2024-11-22T15:24:27,731 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289127731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289127731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289127743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289127744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289127845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289127845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289127847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:27,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289127869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,884 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:27,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:27,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:27,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:27,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:27,950 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/90948a61eb05443fb63f0e284b4261d9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/90948a61eb05443fb63f0e284b4261d9 2024-11-22T15:24:27,957 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 90948a61eb05443fb63f0e284b4261d9(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:27,957 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:27,957 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=12, startTime=1732289067444; duration=0sec 2024-11-22T15:24:27,957 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:27,957 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:28,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:28,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:28,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289128058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289128058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289128058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:28,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289128075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4060fdeec83a46e08ec1a8d06108ec2a 2024-11-22T15:24:28,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f2161e6c85944c86866cbbe0f598f8e9 is 50, key is test_row_0/B:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742481_1657 (size=12301) 2024-11-22T15:24:28,188 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:28,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:28,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,340 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:28,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:28,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289128364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289128371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289128371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289128384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,493 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:28,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:28,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:28,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f2161e6c85944c86866cbbe0f598f8e9 2024-11-22T15:24:28,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c6f4a50a410e46cca7c2508a42493650 is 50, key is test_row_0/C:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:28,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742482_1658 (size=12301) 2024-11-22T15:24:28,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c6f4a50a410e46cca7c2508a42493650 2024-11-22T15:24:28,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4060fdeec83a46e08ec1a8d06108ec2a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a 2024-11-22T15:24:28,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a, entries=300, sequenceid=475, filesize=19.2 K 2024-11-22T15:24:28,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f2161e6c85944c86866cbbe0f598f8e9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9 2024-11-22T15:24:28,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9, entries=150, sequenceid=475, filesize=12.0 K 2024-11-22T15:24:28,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c6f4a50a410e46cca7c2508a42493650 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650 2024-11-22T15:24:28,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650, entries=150, sequenceid=475, filesize=12.0 K 2024-11-22T15:24:28,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed44e89acb87ffee72f4c7902667e851 in 894ms, sequenceid=475, compaction requested=true 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:28,609 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:28,609 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:28,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:28,614 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:28,614 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:28,614 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,614 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/90948a61eb05443fb63f0e284b4261d9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.0 K 2024-11-22T15:24:28,615 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47685 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:28,615 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:28,615 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,615 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1289b7ff4b5342d6bea091e8db7eae0c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=46.6 K 2024-11-22T15:24:28,615 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 90948a61eb05443fb63f0e284b4261d9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:28,615 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1289b7ff4b5342d6bea091e8db7eae0c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:28,616 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d20a38735a94e42be95384a6cd24508, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289066865 2024-11-22T15:24:28,623 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97ad8fc80ae145b3b08b24a65b7a8f95, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289066865 2024-11-22T15:24:28,623 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f2161e6c85944c86866cbbe0f598f8e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067594 2024-11-22T15:24:28,623 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4060fdeec83a46e08ec1a8d06108ec2a, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067593 2024-11-22T15:24:28,632 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:28,633 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/6e4f69d5c3d64ac1ab263b8dbf22146b is 50, key is test_row_0/B:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:28,640 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#568 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:28,641 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/11f871ebce3847948f80970f5fde2185 is 50, key is test_row_0/A:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:28,646 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,647 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:28,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:28,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742483_1659 (size=13425) 2024-11-22T15:24:28,673 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/6e4f69d5c3d64ac1ab263b8dbf22146b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6e4f69d5c3d64ac1ab263b8dbf22146b 2024-11-22T15:24:28,679 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 6e4f69d5c3d64ac1ab263b8dbf22146b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:28,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:28,679 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289068609; duration=0sec 2024-11-22T15:24:28,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:28,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:28,679 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:28,680 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:28,680 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:28,680 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:28,680 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/845fb15e6518464c8b73c577a77a799b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.0 K 2024-11-22T15:24:28,680 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 845fb15e6518464c8b73c577a77a799b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732289066219 2024-11-22T15:24:28,680 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 36eb7ddd29184f468cbf020bf16a76f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1732289066865 2024-11-22T15:24:28,681 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c6f4a50a410e46cca7c2508a42493650, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067594 2024-11-22T15:24:28,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/6e04054c6e5c477f814c6d14f2a5b5f2 is 50, key is test_row_0/A:col10/1732289067743/Put/seqid=0 2024-11-22T15:24:28,712 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#570 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:28,712 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fa4696a78d6f4492a4ce472a7941ffb3 is 50, key is test_row_0/C:col10/1732289067594/Put/seqid=0 2024-11-22T15:24:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742484_1660 (size=13425) 2024-11-22T15:24:28,737 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/11f871ebce3847948f80970f5fde2185 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/11f871ebce3847948f80970f5fde2185 2024-11-22T15:24:28,742 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 11f871ebce3847948f80970f5fde2185(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:28,743 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:28,743 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289068609; duration=0sec 2024-11-22T15:24:28,743 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:28,743 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:28,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742485_1661 (size=12301) 2024-11-22T15:24:28,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742486_1662 (size=13391) 2024-11-22T15:24:28,768 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fa4696a78d6f4492a4ce472a7941ffb3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fa4696a78d6f4492a4ce472a7941ffb3 2024-11-22T15:24:28,774 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into fa4696a78d6f4492a4ce472a7941ffb3(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:28,774 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:28,774 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289068609; duration=0sec 2024-11-22T15:24:28,774 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:28,774 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:28,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:28,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289128961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289128962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289128962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:28,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289128962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,147 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/6e04054c6e5c477f814c6d14f2a5b5f2 2024-11-22T15:24:29,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/753003fd3f9f4c6e8e02501355c89ee3 is 50, key is test_row_0/B:col10/1732289067743/Put/seqid=0 2024-11-22T15:24:29,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742487_1663 (size=12301) 2024-11-22T15:24:29,184 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/753003fd3f9f4c6e8e02501355c89ee3 2024-11-22T15:24:29,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/2ec88f6dfcb24880889ba4df27b226dd is 50, key is test_row_0/C:col10/1732289067743/Put/seqid=0 2024-11-22T15:24:29,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742488_1664 (size=12301) 2024-11-22T15:24:29,233 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/2ec88f6dfcb24880889ba4df27b226dd 2024-11-22T15:24:29,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/6e04054c6e5c477f814c6d14f2a5b5f2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2 2024-11-22T15:24:29,243 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2, entries=150, sequenceid=486, filesize=12.0 K 2024-11-22T15:24:29,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/753003fd3f9f4c6e8e02501355c89ee3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3 2024-11-22T15:24:29,249 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3, entries=150, sequenceid=486, filesize=12.0 K 2024-11-22T15:24:29,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/2ec88f6dfcb24880889ba4df27b226dd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd 2024-11-22T15:24:29,256 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd, entries=150, sequenceid=486, filesize=12.0 K 2024-11-22T15:24:29,257 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for ed44e89acb87ffee72f4c7902667e851 in 610ms, sequenceid=486, compaction requested=false 2024-11-22T15:24:29,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:29,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:29,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-22T15:24:29,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-22T15:24:29,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-22T15:24:29,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2930 sec 2024-11-22T15:24:29,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 2.2970 sec 2024-11-22T15:24:29,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:29,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:29,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/ced0162790ee41cdb4155dfcfa541046 is 50, key is test_row_0/A:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742489_1665 (size=12301) 2024-11-22T15:24:29,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/ced0162790ee41cdb4155dfcfa541046 2024-11-22T15:24:29,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/9b67f798c8914d40ac187e57d123d413 is 50, key is test_row_0/B:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742490_1666 (size=12301) 2024-11-22T15:24:29,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/9b67f798c8914d40ac187e57d123d413 2024-11-22T15:24:29,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fc310245a9664dd49c2824f507fa917c is 50, key is test_row_0/C:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742491_1667 (size=12301) 2024-11-22T15:24:29,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fc310245a9664dd49c2824f507fa917c 2024-11-22T15:24:29,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/ced0162790ee41cdb4155dfcfa541046 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046 2024-11-22T15:24:29,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046, entries=150, sequenceid=517, filesize=12.0 K 2024-11-22T15:24:29,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/9b67f798c8914d40ac187e57d123d413 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413 2024-11-22T15:24:29,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413, entries=150, sequenceid=517, filesize=12.0 K 2024-11-22T15:24:29,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fc310245a9664dd49c2824f507fa917c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c 2024-11-22T15:24:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c, entries=150, sequenceid=517, filesize=12.0 K 2024-11-22T15:24:29,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for ed44e89acb87ffee72f4c7902667e851 in 141ms, sequenceid=517, compaction requested=true 2024-11-22T15:24:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:29,423 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:29,424 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:29,424 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:29,424 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:29,425 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/11f871ebce3847948f80970f5fde2185, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.1 K 2024-11-22T15:24:29,425 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11f871ebce3847948f80970f5fde2185, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067594 2024-11-22T15:24:29,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:29,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:29,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:29,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:29,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-22T15:24:29,426 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:29,426 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e04054c6e5c477f814c6d14f2a5b5f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732289067727 2024-11-22T15:24:29,426 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ced0162790ee41cdb4155dfcfa541046, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:29,427 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:29,427 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:29,427 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:29,427 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6e4f69d5c3d64ac1ab263b8dbf22146b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.1 K 2024-11-22T15:24:29,429 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e4f69d5c3d64ac1ab263b8dbf22146b, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067594 2024-11-22T15:24:29,429 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 753003fd3f9f4c6e8e02501355c89ee3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732289067727 2024-11-22T15:24:29,429 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b67f798c8914d40ac187e57d123d413, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:29,440 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#576 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:29,441 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/08784acf83ab4403bf6b0656603a1159 is 50, key is test_row_0/A:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,449 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#577 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:29,449 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/4bc436a0b2d6477bbf7cd87e44c137f0 is 50, key is test_row_0/B:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742492_1668 (size=13527) 2024-11-22T15:24:29,488 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/08784acf83ab4403bf6b0656603a1159 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/08784acf83ab4403bf6b0656603a1159 2024-11-22T15:24:29,500 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 08784acf83ab4403bf6b0656603a1159(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:29,500 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:29,500 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289069423; duration=0sec 2024-11-22T15:24:29,500 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:29,500 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:29,500 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:29,502 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:29,502 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:29,502 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:29,502 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fa4696a78d6f4492a4ce472a7941ffb3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.1 K 2024-11-22T15:24:29,502 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa4696a78d6f4492a4ce472a7941ffb3, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732289067594 2024-11-22T15:24:29,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ec88f6dfcb24880889ba4df27b226dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732289067727 2024-11-22T15:24:29,503 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc310245a9664dd49c2824f507fa917c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:29,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742493_1669 (size=13527) 2024-11-22T15:24:29,522 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#578 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:29,523 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/23b4ef004d30451085c2429eb22019c8 is 50, key is test_row_0/C:col10/1732289068922/Put/seqid=0 2024-11-22T15:24:29,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742494_1670 (size=13493) 2024-11-22T15:24:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:29,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:29,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:29,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5392aa836ac745fb8f9ac374b44c5a12 is 50, key is test_row_0/A:col10/1732289069606/Put/seqid=0 2024-11-22T15:24:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742495_1671 (size=14737) 2024-11-22T15:24:29,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5392aa836ac745fb8f9ac374b44c5a12 2024-11-22T15:24:29,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/05ebe61fa4644215adb5fe0bd252a74c is 50, key is test_row_0/B:col10/1732289069606/Put/seqid=0 2024-11-22T15:24:29,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742496_1672 (size=9857) 2024-11-22T15:24:29,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,909 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/4bc436a0b2d6477bbf7cd87e44c137f0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4bc436a0b2d6477bbf7cd87e44c137f0 2024-11-22T15:24:29,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 4bc436a0b2d6477bbf7cd87e44c137f0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:29,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:29,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289069425; duration=0sec 2024-11-22T15:24:29,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:29,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:29,938 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/23b4ef004d30451085c2429eb22019c8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/23b4ef004d30451085c2429eb22019c8 2024-11-22T15:24:29,942 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 23b4ef004d30451085c2429eb22019c8(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:29,942 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:29,942 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289069425; duration=0sec 2024-11-22T15:24:29,942 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:29,942 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:29,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289129967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289129968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289129975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:29,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:29,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289129975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/05ebe61fa4644215adb5fe0bd252a74c 2024-11-22T15:24:30,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fdf8b08e1dba41e19ccc1a283a36f99a is 50, key is test_row_0/C:col10/1732289069606/Put/seqid=0 2024-11-22T15:24:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742497_1673 (size=9857) 2024-11-22T15:24:30,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289130271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289130271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289130280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289130280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fdf8b08e1dba41e19ccc1a283a36f99a 2024-11-22T15:24:30,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/5392aa836ac745fb8f9ac374b44c5a12 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12 2024-11-22T15:24:30,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12, entries=200, sequenceid=529, filesize=14.4 K 2024-11-22T15:24:30,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/05ebe61fa4644215adb5fe0bd252a74c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c 2024-11-22T15:24:30,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c, entries=100, sequenceid=529, filesize=9.6 K 2024-11-22T15:24:30,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/fdf8b08e1dba41e19ccc1a283a36f99a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a 2024-11-22T15:24:30,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a, entries=100, sequenceid=529, filesize=9.6 K 2024-11-22T15:24:30,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 919ms, sequenceid=529, compaction requested=false 2024-11-22T15:24:30,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:30,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:30,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:30,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/b59136dd0f9d4b4f9355d03a07b34020 is 50, key is test_row_0/A:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:30,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289130799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289130809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289130811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289130812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742498_1674 (size=14741) 2024-11-22T15:24:30,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/b59136dd0f9d4b4f9355d03a07b34020 2024-11-22T15:24:30,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/0560519cede946b184cd71c79674ba21 is 50, key is test_row_0/B:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:30,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742499_1675 (size=12301) 2024-11-22T15:24:30,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289130911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289130915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289130922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:30,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:30,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289130923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-22T15:24:31,075 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-22T15:24:31,076 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-22T15:24:31,077 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:31,078 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:31,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:31,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289131116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289131121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289131127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289131127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:31,229 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T15:24:31,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:31,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/0560519cede946b184cd71c79674ba21 2024-11-22T15:24:31,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/51f16cdce9cd4c3f8994003a5d62ee30 is 50, key is test_row_0/C:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:31,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742500_1676 (size=12301) 2024-11-22T15:24:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:31,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T15:24:31,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:31,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289131423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289131425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289131432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289131433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T15:24:31,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:31,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:31,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=557 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/51f16cdce9cd4c3f8994003a5d62ee30 2024-11-22T15:24:31,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/b59136dd0f9d4b4f9355d03a07b34020 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020 2024-11-22T15:24:31,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020, entries=200, sequenceid=557, filesize=14.4 K 2024-11-22T15:24:31,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/0560519cede946b184cd71c79674ba21 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21 2024-11-22T15:24:31,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21, entries=150, sequenceid=557, filesize=12.0 K 2024-11-22T15:24:31,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:31,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/51f16cdce9cd4c3f8994003a5d62ee30 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30 2024-11-22T15:24:31,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30, entries=150, sequenceid=557, filesize=12.0 K 2024-11-22T15:24:31,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ed44e89acb87ffee72f4c7902667e851 in 898ms, sequenceid=557, compaction requested=true 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:31,682 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:31,682 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:31,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43005 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35685 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:31,683 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,683 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,683 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4bc436a0b2d6477bbf7cd87e44c137f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=34.8 K 2024-11-22T15:24:31,683 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/08784acf83ab4403bf6b0656603a1159, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=42.0 K 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bc436a0b2d6477bbf7cd87e44c137f0, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08784acf83ab4403bf6b0656603a1159, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 05ebe61fa4644215adb5fe0bd252a74c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732289069605 2024-11-22T15:24:31,683 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5392aa836ac745fb8f9ac374b44c5a12, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732289069293 2024-11-22T15:24:31,684 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0560519cede946b184cd71c79674ba21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:31,684 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting b59136dd0f9d4b4f9355d03a07b34020, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:31,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:31,686 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:31,689 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#585 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:31,689 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#586 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:31,689 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/755bdb780f55400fa13e24435b823414 is 50, key is test_row_0/B:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:31,689 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cb2bc6dca5134b3c916683d4ed11a9b2 is 50, key is test_row_0/A:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:31,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/a56c49df57f3478b852ea00feaa3a10f is 50, key is test_row_0/A:col10/1732289070808/Put/seqid=0 2024-11-22T15:24:31,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742502_1678 (size=13629) 2024-11-22T15:24:31,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742501_1677 (size=13629) 2024-11-22T15:24:31,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742503_1679 (size=12301) 2024-11-22T15:24:31,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:31,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289131960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289131961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289131962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:31,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289131962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289132065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289132071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289132072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289132072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,099 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/a56c49df57f3478b852ea00feaa3a10f 2024-11-22T15:24:32,099 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/755bdb780f55400fa13e24435b823414 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/755bdb780f55400fa13e24435b823414 2024-11-22T15:24:32,100 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/cb2bc6dca5134b3c916683d4ed11a9b2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cb2bc6dca5134b3c916683d4ed11a9b2 2024-11-22T15:24:32,103 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 755bdb780f55400fa13e24435b823414(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:32,103 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:32,103 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289071682; duration=0sec 2024-11-22T15:24:32,103 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:32,103 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:32,104 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:32,104 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:32,104 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:32,104 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:32,104 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/23b4ef004d30451085c2429eb22019c8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=34.8 K 2024-11-22T15:24:32,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/053caa87454c494cb8aa4e0adc221229 is 50, key is test_row_0/B:col10/1732289070808/Put/seqid=0 2024-11-22T15:24:32,105 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into cb2bc6dca5134b3c916683d4ed11a9b2(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:32,105 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289071682; duration=0sec 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 23b4ef004d30451085c2429eb22019c8, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732289068922 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting fdf8b08e1dba41e19ccc1a283a36f99a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732289069605 2024-11-22T15:24:32,105 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 51f16cdce9cd4c3f8994003a5d62ee30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:32,116 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#589 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:32,117 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/583314516354450cb75ac7df7fe863e4 is 50, key is test_row_0/C:col10/1732289069649/Put/seqid=0 2024-11-22T15:24:32,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742504_1680 (size=12301) 2024-11-22T15:24:32,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742505_1681 (size=13595) 2024-11-22T15:24:32,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:32,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289132270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289132276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289132276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289132277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38240 deadline: 1732289132515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,517 DEBUG [Thread-2454 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18258 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., hostname=77927f992d0b,36033,1732288915809, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:24:32,519 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/053caa87454c494cb8aa4e0adc221229 2024-11-22T15:24:32,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/1bfcba53edbb4513877a71da009f7777 is 50, key is test_row_0/C:col10/1732289070808/Put/seqid=0 2024-11-22T15:24:32,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742506_1682 (size=12301) 2024-11-22T15:24:32,527 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/583314516354450cb75ac7df7fe863e4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/583314516354450cb75ac7df7fe863e4 2024-11-22T15:24:32,530 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 583314516354450cb75ac7df7fe863e4(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:32,530 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:32,530 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289071682; duration=0sec 2024-11-22T15:24:32,530 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:32,530 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:32,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289132574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289132581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289132581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:32,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289132582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:32,927 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/1bfcba53edbb4513877a71da009f7777 2024-11-22T15:24:32,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/a56c49df57f3478b852ea00feaa3a10f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f 2024-11-22T15:24:32,932 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f, entries=150, sequenceid=568, filesize=12.0 K 2024-11-22T15:24:32,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/053caa87454c494cb8aa4e0adc221229 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229 2024-11-22T15:24:32,935 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229, entries=150, sequenceid=568, filesize=12.0 K 2024-11-22T15:24:32,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/1bfcba53edbb4513877a71da009f7777 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777 2024-11-22T15:24:32,938 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777, entries=150, sequenceid=568, filesize=12.0 K 2024-11-22T15:24:32,939 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for ed44e89acb87ffee72f4c7902667e851 in 1252ms, sequenceid=568, compaction requested=false 2024-11-22T15:24:32,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:32,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:32,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-22T15:24:32,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-22T15:24:32,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-22T15:24:32,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8610 sec 2024-11-22T15:24:32,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.8650 sec 2024-11-22T15:24:33,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:33,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:33,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:33,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/315a02762d5c47af9dd9bc290af19ad2 is 50, key is test_row_0/A:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:33,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742507_1683 (size=14741) 2024-11-22T15:24:33,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289133094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289133095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289133101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289133102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-22T15:24:33,181 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-22T15:24:33,181 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:33,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-22T15:24:33,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:33,183 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:33,183 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:33,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:33,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289133203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289133203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289133204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289133210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:33,334 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:33,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:33,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289133408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289133408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289133409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289133414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:33,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:33,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:33,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,488 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/315a02762d5c47af9dd9bc290af19ad2 2024-11-22T15:24:33,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/126aab51361842c595111f8a92f7e60f is 50, key is test_row_0/B:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:33,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742508_1684 (size=12301) 2024-11-22T15:24:33,638 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:33,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:33,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289133714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289133714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289133715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:33,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289133720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:33,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/126aab51361842c595111f8a92f7e60f 2024-11-22T15:24:33,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a31b8cbae7874038810962f77939f498 is 50, key is test_row_0/C:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742509_1685 (size=12301) 2024-11-22T15:24:33,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:33,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:33,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:33,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:33,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:34,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:34,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38182 deadline: 1732289134219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38200 deadline: 1732289134219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:34,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38202 deadline: 1732289134225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38250 deadline: 1732289134227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:34,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:34,299 DEBUG [Thread-2473 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51196534 to 127.0.0.1:52970 2024-11-22T15:24:34,299 DEBUG [Thread-2473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:34,300 DEBUG [Thread-2469 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6050584c to 127.0.0.1:52970 2024-11-22T15:24:34,300 DEBUG [Thread-2469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:34,303 DEBUG [Thread-2467 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:52970 2024-11-22T15:24:34,303 DEBUG [Thread-2467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:34,304 DEBUG [Thread-2465 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:52970 2024-11-22T15:24:34,304 DEBUG [Thread-2465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:34,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=597 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a31b8cbae7874038810962f77939f498 2024-11-22T15:24:34,306 DEBUG [Thread-2471 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:52970 2024-11-22T15:24:34,306 DEBUG [Thread-2471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:34,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/315a02762d5c47af9dd9bc290af19ad2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2 2024-11-22T15:24:34,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2, entries=200, sequenceid=597, filesize=14.4 K 2024-11-22T15:24:34,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/126aab51361842c595111f8a92f7e60f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f 2024-11-22T15:24:34,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f, entries=150, sequenceid=597, filesize=12.0 K 2024-11-22T15:24:34,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/a31b8cbae7874038810962f77939f498 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498 2024-11-22T15:24:34,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498, entries=150, sequenceid=597, filesize=12.0 K 2024-11-22T15:24:34,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ed44e89acb87ffee72f4c7902667e851 in 1233ms, sequenceid=597, compaction requested=true 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:34,314 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:34,314 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ed44e89acb87ffee72f4c7902667e851:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/B is initiating minor compaction (all files) 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/A is initiating minor compaction (all files) 2024-11-22T15:24:34,315 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/B in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,315 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/A in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,315 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/755bdb780f55400fa13e24435b823414, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.3 K 2024-11-22T15:24:34,315 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cb2bc6dca5134b3c916683d4ed11a9b2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=39.7 K 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb2bc6dca5134b3c916683d4ed11a9b2, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 755bdb780f55400fa13e24435b823414, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a56c49df57f3478b852ea00feaa3a10f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1732289070808 2024-11-22T15:24:34,315 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 053caa87454c494cb8aa4e0adc221229, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1732289070808 2024-11-22T15:24:34,316 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 315a02762d5c47af9dd9bc290af19ad2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=597, earliestPutTs=1732289071960 2024-11-22T15:24:34,316 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 126aab51361842c595111f8a92f7e60f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=597, earliestPutTs=1732289071960 2024-11-22T15:24:34,320 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#A#compaction#594 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:34,321 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/60c3df1b627d4bcd86e0117951ade686 is 50, key is test_row_0/A:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:34,322 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#B#compaction#595 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:34,323 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/1bd5a1d5a0444f16ba4a1c94ec27a9ce is 50, key is test_row_0/B:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:34,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742510_1686 (size=13731) 2024-11-22T15:24:34,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742511_1687 (size=13731) 2024-11-22T15:24:34,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:34,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-22T15:24:34,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,400 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:34,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:34,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c11c6ee50dce430eaf36153deae912a6 is 50, key is test_row_0/A:col10/1732289073100/Put/seqid=0 2024-11-22T15:24:34,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742512_1688 (size=12301) 2024-11-22T15:24:34,734 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/1bd5a1d5a0444f16ba4a1c94ec27a9ce as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/1bd5a1d5a0444f16ba4a1c94ec27a9ce 2024-11-22T15:24:34,734 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/60c3df1b627d4bcd86e0117951ade686 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/60c3df1b627d4bcd86e0117951ade686 2024-11-22T15:24:34,740 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/A of ed44e89acb87ffee72f4c7902667e851 into 60c3df1b627d4bcd86e0117951ade686(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:34,740 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/B of ed44e89acb87ffee72f4c7902667e851 into 1bd5a1d5a0444f16ba4a1c94ec27a9ce(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:34,740 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/A, priority=13, startTime=1732289074314; duration=0sec 2024-11-22T15:24:34,740 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/B, priority=13, startTime=1732289074314; duration=0sec 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:A 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:B 2024-11-22T15:24:34,740 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:34,741 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:34,741 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): ed44e89acb87ffee72f4c7902667e851/C is initiating minor compaction (all files) 2024-11-22T15:24:34,741 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ed44e89acb87ffee72f4c7902667e851/C in TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:34,741 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/583314516354450cb75ac7df7fe863e4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp, totalSize=37.3 K 2024-11-22T15:24:34,742 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 583314516354450cb75ac7df7fe863e4, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=557, earliestPutTs=1732289069649 2024-11-22T15:24:34,742 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bfcba53edbb4513877a71da009f7777, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1732289070808 2024-11-22T15:24:34,742 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a31b8cbae7874038810962f77939f498, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=597, earliestPutTs=1732289071960 2024-11-22T15:24:34,752 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ed44e89acb87ffee72f4c7902667e851#C#compaction#597 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:34,752 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/89347dab75e1403082bfd99b873091ac is 50, key is test_row_0/C:col10/1732289071960/Put/seqid=0 2024-11-22T15:24:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742513_1689 (size=13697) 2024-11-22T15:24:34,814 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c11c6ee50dce430eaf36153deae912a6 2024-11-22T15:24:34,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/fde39dc2c3a24430bee2e3269371c4e7 is 50, key is test_row_0/B:col10/1732289073100/Put/seqid=0 2024-11-22T15:24:34,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742514_1690 (size=12301) 2024-11-22T15:24:35,165 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/89347dab75e1403082bfd99b873091ac as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/89347dab75e1403082bfd99b873091ac 2024-11-22T15:24:35,170 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ed44e89acb87ffee72f4c7902667e851/C of ed44e89acb87ffee72f4c7902667e851 into 89347dab75e1403082bfd99b873091ac(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:35,170 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:35,170 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851., storeName=ed44e89acb87ffee72f4c7902667e851/C, priority=13, startTime=1732289074314; duration=0sec 2024-11-22T15:24:35,170 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:35,170 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ed44e89acb87ffee72f4c7902667e851:C 2024-11-22T15:24:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:35,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. as already flushing 2024-11-22T15:24:35,225 DEBUG [Thread-2458 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:52970 2024-11-22T15:24:35,225 DEBUG [Thread-2458 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:35,229 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/fde39dc2c3a24430bee2e3269371c4e7 2024-11-22T15:24:35,234 DEBUG [Thread-2462 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:52970 2024-11-22T15:24:35,234 DEBUG [Thread-2462 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:35,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/db2b120588fb4ef0a8a4cdd66674d533 is 50, key is test_row_0/C:col10/1732289073100/Put/seqid=0 2024-11-22T15:24:35,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742515_1691 (size=12301) 2024-11-22T15:24:35,238 DEBUG [Thread-2456 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65df2359 to 127.0.0.1:52970 2024-11-22T15:24:35,238 DEBUG [Thread-2460 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:52970 2024-11-22T15:24:35,238 DEBUG [Thread-2456 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:35,238 DEBUG [Thread-2460 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:35,239 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=607 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/db2b120588fb4ef0a8a4cdd66674d533 2024-11-22T15:24:35,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/c11c6ee50dce430eaf36153deae912a6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c11c6ee50dce430eaf36153deae912a6 2024-11-22T15:24:35,245 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c11c6ee50dce430eaf36153deae912a6, entries=150, sequenceid=607, filesize=12.0 K 2024-11-22T15:24:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/fde39dc2c3a24430bee2e3269371c4e7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/fde39dc2c3a24430bee2e3269371c4e7 2024-11-22T15:24:35,249 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/fde39dc2c3a24430bee2e3269371c4e7, entries=150, sequenceid=607, filesize=12.0 K 2024-11-22T15:24:35,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/db2b120588fb4ef0a8a4cdd66674d533 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/db2b120588fb4ef0a8a4cdd66674d533 2024-11-22T15:24:35,251 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/db2b120588fb4ef0a8a4cdd66674d533, entries=150, sequenceid=607, filesize=12.0 K 2024-11-22T15:24:35,252 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=26.84 KB/27480 for ed44e89acb87ffee72f4c7902667e851 in 853ms, sequenceid=607, compaction requested=false 2024-11-22T15:24:35,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:35,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:35,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-22T15:24:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-22T15:24:35,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-22T15:24:35,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0700 sec 2024-11-22T15:24:35,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 2.0730 sec 2024-11-22T15:24:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-22T15:24:35,286 INFO [Thread-2464 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-22T15:24:42,610 DEBUG [Thread-2454 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63cefe40 to 127.0.0.1:52970 2024-11-22T15:24:42,610 DEBUG [Thread-2454 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 1 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 122 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 127 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 124 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 109 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1535 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4605 rows 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1531 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4593 rows 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1529 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4587 rows 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1519 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4557 rows 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1541 2024-11-22T15:24:42,610 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4623 rows 2024-11-22T15:24:42,610 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:24:42,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x301741f1 to 127.0.0.1:52970 2024-11-22T15:24:42,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:24:42,613 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:24:42,613 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:24:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:42,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:42,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289082616"}]},"ts":"1732289082616"} 2024-11-22T15:24:42,618 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:24:42,674 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:24:42,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:24:42,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, UNASSIGN}] 2024-11-22T15:24:42,677 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, UNASSIGN 2024-11-22T15:24:42,678 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=ed44e89acb87ffee72f4c7902667e851, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:42,679 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:24:42,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:42,830 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:42,830 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing ed44e89acb87ffee72f4c7902667e851, disabling compactions & flushes 2024-11-22T15:24:42,831 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. after waiting 0 ms 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:42,831 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing ed44e89acb87ffee72f4c7902667e851 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=A 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=B 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ed44e89acb87ffee72f4c7902667e851, store=C 2024-11-22T15:24:42,831 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:42,835 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4754d36c8c334128a51b14d1017b4bad is 50, key is test_row_0/A:col10/1732289075237/Put/seqid=0 2024-11-22T15:24:42,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742516_1692 (size=12301) 2024-11-22T15:24:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:43,238 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4754d36c8c334128a51b14d1017b4bad 2024-11-22T15:24:43,243 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f233b16ac7fd48fa91df64fb70a4d61a is 50, key is test_row_0/B:col10/1732289075237/Put/seqid=0 2024-11-22T15:24:43,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742517_1693 (size=12301) 2024-11-22T15:24:43,646 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f233b16ac7fd48fa91df64fb70a4d61a 2024-11-22T15:24:43,651 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c02d2620cc894180bb7081df8ac870c5 is 50, key is test_row_0/C:col10/1732289075237/Put/seqid=0 2024-11-22T15:24:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742518_1694 (size=12301) 2024-11-22T15:24:43,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:44,054 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c02d2620cc894180bb7081df8ac870c5 2024-11-22T15:24:44,059 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/A/4754d36c8c334128a51b14d1017b4bad as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4754d36c8c334128a51b14d1017b4bad 2024-11-22T15:24:44,063 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4754d36c8c334128a51b14d1017b4bad, entries=150, sequenceid=618, filesize=12.0 K 2024-11-22T15:24:44,064 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/B/f233b16ac7fd48fa91df64fb70a4d61a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f233b16ac7fd48fa91df64fb70a4d61a 2024-11-22T15:24:44,069 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f233b16ac7fd48fa91df64fb70a4d61a, entries=150, sequenceid=618, filesize=12.0 K 2024-11-22T15:24:44,070 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/.tmp/C/c02d2620cc894180bb7081df8ac870c5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c02d2620cc894180bb7081df8ac870c5 2024-11-22T15:24:44,073 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c02d2620cc894180bb7081df8ac870c5, entries=150, sequenceid=618, filesize=12.0 K 2024-11-22T15:24:44,074 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for ed44e89acb87ffee72f4c7902667e851 in 1243ms, sequenceid=618, compaction requested=true 2024-11-22T15:24:44,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a3e996a1cb524298bfe48dfad09a1fac, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/28c6b400ca75493291b518ef55e726fd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1e97f0c27b614fd6812fbfc5fdccbd2f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12b73b2b68f3430fb455b983a9df4f39, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/eb978b6cebeb47f29957ef518ded005a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/693c4767a7014770afcac4a6f137acb4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/f423b6dc354f47bd94d1790e01b8439e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/822ee1c78e2244228d88de8de0f4481c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1289b7ff4b5342d6bea091e8db7eae0c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/11f871ebce3847948f80970f5fde2185, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/08784acf83ab4403bf6b0656603a1159, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cb2bc6dca5134b3c916683d4ed11a9b2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2] to archive 2024-11-22T15:24:44,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:44,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c99f64c4c72549448cf6f01da8e45ca6 2024-11-22T15:24:44,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0abad8afff374a02ba9e4e0dca683fab 2024-11-22T15:24:44,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/68b4affc247148819f809cf4ad5917d5 2024-11-22T15:24:44,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a3e996a1cb524298bfe48dfad09a1fac to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a3e996a1cb524298bfe48dfad09a1fac 2024-11-22T15:24:44,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/139c6eff4860440892562b5638c087d4 2024-11-22T15:24:44,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf8aa59544c5494c9213c938cfdc3957 2024-11-22T15:24:44,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d73ea038e77a4b8db4f5a7407d5bbed3 2024-11-22T15:24:44,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/28c6b400ca75493291b518ef55e726fd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/28c6b400ca75493291b518ef55e726fd 2024-11-22T15:24:44,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5fc8ba1542404371baef55275918ddc1 2024-11-22T15:24:44,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/17c66d7f70d34b529a64c341b49f9443 2024-11-22T15:24:44,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1e97f0c27b614fd6812fbfc5fdccbd2f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1e97f0c27b614fd6812fbfc5fdccbd2f 2024-11-22T15:24:44,094 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12a5cf6595de422bb29581d3a582eb82 2024-11-22T15:24:44,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0739e25e5d72436182332f9e6ee10438 2024-11-22T15:24:44,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12b73b2b68f3430fb455b983a9df4f39 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/12b73b2b68f3430fb455b983a9df4f39 2024-11-22T15:24:44,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/2829fd5f50774e7dbb71836cfbf959ac 2024-11-22T15:24:44,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cf5ff1f8f05b4cdaacf68f3fc59ca233 2024-11-22T15:24:44,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/eb978b6cebeb47f29957ef518ded005a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/eb978b6cebeb47f29957ef518ded005a 2024-11-22T15:24:44,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/067b4110d8a149c494c6be3ad1357ca8 2024-11-22T15:24:44,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/693c4767a7014770afcac4a6f137acb4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/693c4767a7014770afcac4a6f137acb4 2024-11-22T15:24:44,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dbb32d55ff004f0f8e5d809b933674fe 2024-11-22T15:24:44,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/d3caa6275b6b4cd8a82fe5c5e9b367f7 2024-11-22T15:24:44,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/aa92b8c8a0cf44f694dd9c6b36c675b9 2024-11-22T15:24:44,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/f423b6dc354f47bd94d1790e01b8439e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/f423b6dc354f47bd94d1790e01b8439e 2024-11-22T15:24:44,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/afba40a0b51e46d49f0d820f69675555 2024-11-22T15:24:44,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/250c839f44e749a09716cfbac0f83096 2024-11-22T15:24:44,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/822ee1c78e2244228d88de8de0f4481c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/822ee1c78e2244228d88de8de0f4481c 2024-11-22T15:24:44,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/0719449ff7a54984b655f4cea7699a5d 2024-11-22T15:24:44,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/9fd60e78a3244509989d3853d6ec97d7 2024-11-22T15:24:44,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/13f5ebc9be534cdab9951bb02b289b6d 2024-11-22T15:24:44,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/dcb6c72f031d460fa1c42b126968f66e 2024-11-22T15:24:44,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1289b7ff4b5342d6bea091e8db7eae0c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/1289b7ff4b5342d6bea091e8db7eae0c 2024-11-22T15:24:44,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/97ad8fc80ae145b3b08b24a65b7a8f95 2024-11-22T15:24:44,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4060fdeec83a46e08ec1a8d06108ec2a 2024-11-22T15:24:44,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/11f871ebce3847948f80970f5fde2185 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/11f871ebce3847948f80970f5fde2185 2024-11-22T15:24:44,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/6e04054c6e5c477f814c6d14f2a5b5f2 2024-11-22T15:24:44,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/08784acf83ab4403bf6b0656603a1159 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/08784acf83ab4403bf6b0656603a1159 2024-11-22T15:24:44,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/ced0162790ee41cdb4155dfcfa541046 2024-11-22T15:24:44,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/5392aa836ac745fb8f9ac374b44c5a12 2024-11-22T15:24:44,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/b59136dd0f9d4b4f9355d03a07b34020 2024-11-22T15:24:44,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cb2bc6dca5134b3c916683d4ed11a9b2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/cb2bc6dca5134b3c916683d4ed11a9b2 2024-11-22T15:24:44,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/a56c49df57f3478b852ea00feaa3a10f 2024-11-22T15:24:44,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/315a02762d5c47af9dd9bc290af19ad2 2024-11-22T15:24:44,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/ccced7bbc5a947689f511f1dc25cb5e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/162412ee005f4050bd62bcdf49e107de, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/762482e199d34201aa92bc171db22ef0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/22b4f1413eea4fd3961515d794a9d128, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f7cc5715e9ae4a348c84d6e589bc6636, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a9a26e26cde84fd7a0d240a85d48c98d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/bbadeaf50ab44844a4a0a5899f9b843b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/90948a61eb05443fb63f0e284b4261d9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6e4f69d5c3d64ac1ab263b8dbf22146b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4bc436a0b2d6477bbf7cd87e44c137f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/755bdb780f55400fa13e24435b823414, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f] to archive 2024-11-22T15:24:44,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:44,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/730ea2680882415d9aaf5a383137fbb6 2024-11-22T15:24:44,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/c0b105654a6042f8a736c7bfbaa5e208 2024-11-22T15:24:44,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/ccced7bbc5a947689f511f1dc25cb5e3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/ccced7bbc5a947689f511f1dc25cb5e3 2024-11-22T15:24:44,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a94976aa10b640cca2d897dc5ba0f220 2024-11-22T15:24:44,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e45f5588a0e24342907f275f8818badf 2024-11-22T15:24:44,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/678e2c356a274cc5ba19bbe9f5781e12 2024-11-22T15:24:44,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/162412ee005f4050bd62bcdf49e107de to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/162412ee005f4050bd62bcdf49e107de 2024-11-22T15:24:44,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/37e204bec45e4342af1a6ee4ef4f1522 2024-11-22T15:24:44,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/094a45bc21d44feca29faf5a95812ba0 2024-11-22T15:24:44,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/762482e199d34201aa92bc171db22ef0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/762482e199d34201aa92bc171db22ef0 2024-11-22T15:24:44,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/7525b018f71f4cfab94babb1cf548031 2024-11-22T15:24:44,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a37e6af2631f42128e922bb7d92206a7 2024-11-22T15:24:44,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/22b4f1413eea4fd3961515d794a9d128 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/22b4f1413eea4fd3961515d794a9d128 2024-11-22T15:24:44,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/b7338a095a544fe7ba0d797a87c4c47c 2024-11-22T15:24:44,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4084b3a3a3aa48ab84eccd1824293a06 2024-11-22T15:24:44,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f7cc5715e9ae4a348c84d6e589bc6636 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f7cc5715e9ae4a348c84d6e589bc6636 2024-11-22T15:24:44,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/e4cada4bfd9a4ff0aa71a3fe32aa3d82 2024-11-22T15:24:44,250 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/3cc02ecb669e49f8802d43d00c054fe6 2024-11-22T15:24:44,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a9a26e26cde84fd7a0d240a85d48c98d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a9a26e26cde84fd7a0d240a85d48c98d 2024-11-22T15:24:44,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/42cfd2ebbf0b470aaa06aa630eba3c00 2024-11-22T15:24:44,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8746eb853b07408d8b49547bfba4c0a0 2024-11-22T15:24:44,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/8a0ceca5ce7d49f7bc60ebf71cb4bc95 2024-11-22T15:24:44,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/131414e19f5d4a4a86b7bd98d2d0d0e3 2024-11-22T15:24:44,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/a8f3ced3bda44c4faa16d896ba19b267 2024-11-22T15:24:44,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/5240c96346244b5fa7edd7fb8c3abe37 2024-11-22T15:24:44,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/bbadeaf50ab44844a4a0a5899f9b843b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/bbadeaf50ab44844a4a0a5899f9b843b 2024-11-22T15:24:44,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/94e9f3a7861746b6816ddd6841c653d8 2024-11-22T15:24:44,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/28c3e036d8784e4ab7a39d5c9b7fd73c 2024-11-22T15:24:44,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/389d3bf5d1f8450f984335917a4b8ce4 2024-11-22T15:24:44,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/90948a61eb05443fb63f0e284b4261d9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/90948a61eb05443fb63f0e284b4261d9 2024-11-22T15:24:44,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/27003832ca1e470ba6679d61c400a582 2024-11-22T15:24:44,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6d20a38735a94e42be95384a6cd24508 2024-11-22T15:24:44,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6e4f69d5c3d64ac1ab263b8dbf22146b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/6e4f69d5c3d64ac1ab263b8dbf22146b 2024-11-22T15:24:44,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f2161e6c85944c86866cbbe0f598f8e9 2024-11-22T15:24:44,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/753003fd3f9f4c6e8e02501355c89ee3 2024-11-22T15:24:44,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4bc436a0b2d6477bbf7cd87e44c137f0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/4bc436a0b2d6477bbf7cd87e44c137f0 2024-11-22T15:24:44,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/9b67f798c8914d40ac187e57d123d413 2024-11-22T15:24:44,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/05ebe61fa4644215adb5fe0bd252a74c 2024-11-22T15:24:44,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/755bdb780f55400fa13e24435b823414 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/755bdb780f55400fa13e24435b823414 2024-11-22T15:24:44,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/0560519cede946b184cd71c79674ba21 2024-11-22T15:24:44,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/053caa87454c494cb8aa4e0adc221229 2024-11-22T15:24:44,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/126aab51361842c595111f8a92f7e60f 2024-11-22T15:24:44,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/bc12ee2135ba4f58bdc14283f26cb37b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/70b31ad5aca14501a72592af90972a8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a8453c47a7324983a399da7d7b12efeb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c4e2c80981a84e3b926369b5dbef7f17, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/91446375bc5b48fbbad794c8a7ddee91, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1b2c0e8321b34c93aa0f56bcd22ad983, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/7bd6f68435fd44b6b12753d15884c77f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/845fb15e6518464c8b73c577a77a799b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fa4696a78d6f4492a4ce472a7941ffb3, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/23b4ef004d30451085c2429eb22019c8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/583314516354450cb75ac7df7fe863e4, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498] to archive 2024-11-22T15:24:44,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:24:44,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9f8dcf82581b4fb281262fda41374a35 2024-11-22T15:24:44,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a78c87c84cd34aec8f9c0ed75ab9d923 2024-11-22T15:24:44,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/bc12ee2135ba4f58bdc14283f26cb37b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/bc12ee2135ba4f58bdc14283f26cb37b 2024-11-22T15:24:44,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/feea95b80ed14a159d59b792ca3d9223 2024-11-22T15:24:44,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/12fc72e6ee604b6a9ca3e3bb53856991 2024-11-22T15:24:44,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/0f48028a913f4bfb920eddac6b8f8122 2024-11-22T15:24:44,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/70b31ad5aca14501a72592af90972a8c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/70b31ad5aca14501a72592af90972a8c 2024-11-22T15:24:44,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5dc3a98dc97b4a41a1641f1dfb072bc7 2024-11-22T15:24:44,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ff8443efcd3240fcb7f8c10cbcfa6474 2024-11-22T15:24:44,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/df537eec9935473ea63fd39ba7109178 2024-11-22T15:24:44,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a8453c47a7324983a399da7d7b12efeb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a8453c47a7324983a399da7d7b12efeb 2024-11-22T15:24:44,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9ee2c831366945c2bfcf24e6c8381db5 2024-11-22T15:24:44,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/9d23b1c14b1447fb846c7cb3f98cbdfa 2024-11-22T15:24:44,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/5b8f070dc3d444129c055b9d28c6b099 2024-11-22T15:24:44,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c4e2c80981a84e3b926369b5dbef7f17 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c4e2c80981a84e3b926369b5dbef7f17 2024-11-22T15:24:44,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/dca4f86853be4e4fb8ed06909a67aeed 2024-11-22T15:24:44,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6cdd2e013d094bb0abaadb488601c1e2 2024-11-22T15:24:44,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/91446375bc5b48fbbad794c8a7ddee91 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/91446375bc5b48fbbad794c8a7ddee91 2024-11-22T15:24:44,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/4b9c2ef9b8c0419585dfe870625d5f05 2024-11-22T15:24:44,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/13bc304af6fb46b182d4cffab874fb37 2024-11-22T15:24:44,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1b2c0e8321b34c93aa0f56bcd22ad983 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1b2c0e8321b34c93aa0f56bcd22ad983 2024-11-22T15:24:44,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/54eaf4373b764e40bdd2c83d30f0f4d4 2024-11-22T15:24:44,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/56a95d91224246aca8eab940acac3ff7 2024-11-22T15:24:44,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/20e31522cf07400cb871e40864ac4e28 2024-11-22T15:24:44,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/7bd6f68435fd44b6b12753d15884c77f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/7bd6f68435fd44b6b12753d15884c77f 2024-11-22T15:24:44,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/3e8e1d3296b34f79b671aacce28f677c 2024-11-22T15:24:44,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/ea139240038b45eb832fb0e83914143d 2024-11-22T15:24:44,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/334fedd0e96b49b296d164e2ef056504 2024-11-22T15:24:44,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/845fb15e6518464c8b73c577a77a799b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/845fb15e6518464c8b73c577a77a799b 2024-11-22T15:24:44,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/6e6b9d8737f542d9b36b63bdc1ac65a9 2024-11-22T15:24:44,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/36eb7ddd29184f468cbf020bf16a76f0 2024-11-22T15:24:44,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fa4696a78d6f4492a4ce472a7941ffb3 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fa4696a78d6f4492a4ce472a7941ffb3 2024-11-22T15:24:44,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c6f4a50a410e46cca7c2508a42493650 2024-11-22T15:24:44,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/2ec88f6dfcb24880889ba4df27b226dd 2024-11-22T15:24:44,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/23b4ef004d30451085c2429eb22019c8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/23b4ef004d30451085c2429eb22019c8 2024-11-22T15:24:44,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fc310245a9664dd49c2824f507fa917c 2024-11-22T15:24:44,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/fdf8b08e1dba41e19ccc1a283a36f99a 2024-11-22T15:24:44,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/583314516354450cb75ac7df7fe863e4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/583314516354450cb75ac7df7fe863e4 2024-11-22T15:24:44,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/51f16cdce9cd4c3f8994003a5d62ee30 2024-11-22T15:24:44,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/1bfcba53edbb4513877a71da009f7777 2024-11-22T15:24:44,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/a31b8cbae7874038810962f77939f498 2024-11-22T15:24:44,323 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/recovered.edits/621.seqid, newMaxSeqId=621, maxSeqId=1 2024-11-22T15:24:44,323 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851. 2024-11-22T15:24:44,323 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for ed44e89acb87ffee72f4c7902667e851: 2024-11-22T15:24:44,324 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:44,325 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=ed44e89acb87ffee72f4c7902667e851, regionState=CLOSED 2024-11-22T15:24:44,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-22T15:24:44,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure ed44e89acb87ffee72f4c7902667e851, server=77927f992d0b,36033,1732288915809 in 1.6460 sec 2024-11-22T15:24:44,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-22T15:24:44,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ed44e89acb87ffee72f4c7902667e851, UNASSIGN in 1.6500 sec 2024-11-22T15:24:44,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-22T15:24:44,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6520 sec 2024-11-22T15:24:44,329 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289084329"}]},"ts":"1732289084329"} 2024-11-22T15:24:44,329 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:24:44,391 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:24:44,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.7780 sec 2024-11-22T15:24:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-22T15:24:44,721 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-22T15:24:44,722 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:24:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,723 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-22T15:24:44,724 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,725 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:44,727 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/recovered.edits] 2024-11-22T15:24:44,729 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4754d36c8c334128a51b14d1017b4bad to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/4754d36c8c334128a51b14d1017b4bad 2024-11-22T15:24:44,730 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/60c3df1b627d4bcd86e0117951ade686 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/60c3df1b627d4bcd86e0117951ade686 2024-11-22T15:24:44,731 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c11c6ee50dce430eaf36153deae912a6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/A/c11c6ee50dce430eaf36153deae912a6 2024-11-22T15:24:44,732 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/1bd5a1d5a0444f16ba4a1c94ec27a9ce to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/1bd5a1d5a0444f16ba4a1c94ec27a9ce 2024-11-22T15:24:44,733 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f233b16ac7fd48fa91df64fb70a4d61a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/f233b16ac7fd48fa91df64fb70a4d61a 2024-11-22T15:24:44,734 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/fde39dc2c3a24430bee2e3269371c4e7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/B/fde39dc2c3a24430bee2e3269371c4e7 2024-11-22T15:24:44,735 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/89347dab75e1403082bfd99b873091ac to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/89347dab75e1403082bfd99b873091ac 2024-11-22T15:24:44,736 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c02d2620cc894180bb7081df8ac870c5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/c02d2620cc894180bb7081df8ac870c5 2024-11-22T15:24:44,736 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/db2b120588fb4ef0a8a4cdd66674d533 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/C/db2b120588fb4ef0a8a4cdd66674d533 2024-11-22T15:24:44,738 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/recovered.edits/621.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851/recovered.edits/621.seqid 2024-11-22T15:24:44,738 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/ed44e89acb87ffee72f4c7902667e851 2024-11-22T15:24:44,738 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:24:44,740 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,741 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:24:44,742 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:24:44,743 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,743 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:24:44,743 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732289084743"}]},"ts":"9223372036854775807"} 2024-11-22T15:24:44,744 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:24:44,744 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ed44e89acb87ffee72f4c7902667e851, NAME => 'TestAcidGuarantees,,1732289052948.ed44e89acb87ffee72f4c7902667e851.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:24:44,744 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:24:44,744 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732289084744"}]},"ts":"9223372036854775807"} 2024-11-22T15:24:44,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:24:44,758 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-11-22T15:24:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-22T15:24:44,824 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-22T15:24:44,832 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237 (was 242), OpenFileDescriptor=449 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=691 (was 679) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5219 (was 4055) - AvailableMemoryMB LEAK? - 2024-11-22T15:24:44,840 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=691, ProcessCount=11, AvailableMemoryMB=5219 2024-11-22T15:24:44,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:24:44,842 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:24:44,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:44,843 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T15:24:44,843 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:44,843 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-11-22T15:24:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:44,843 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T15:24:44,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742519_1695 (size=963) 2024-11-22T15:24:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:45,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:24:45,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:45,250 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690 2024-11-22T15:24:45,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742520_1696 (size=53) 2024-11-22T15:24:45,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 33885ab322a31541d17da102b047512b, disabling compactions & flushes 2024-11-22T15:24:45,659 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. after waiting 0 ms 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:45,659 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:45,659 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:45,661 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T15:24:45,661 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732289085661"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732289085661"}]},"ts":"1732289085661"} 2024-11-22T15:24:45,663 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-22T15:24:45,665 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T15:24:45,665 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289085665"}]},"ts":"1732289085665"} 2024-11-22T15:24:45,667 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-22T15:24:45,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, ASSIGN}] 2024-11-22T15:24:45,767 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, ASSIGN 2024-11-22T15:24:45,769 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, ASSIGN; state=OFFLINE, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=false 2024-11-22T15:24:45,919 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:45,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:45,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:46,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:46,075 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:46,075 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:24:46,075 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,075 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:24:46,075 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,075 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,076 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,077 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:46,077 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName A 2024-11-22T15:24:46,077 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:46,078 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:46,078 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,079 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:46,079 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName B 2024-11-22T15:24:46,079 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:46,079 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:46,079 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,080 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:46,080 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName C 2024-11-22T15:24:46,080 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:46,080 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:46,080 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:46,081 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,081 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,082 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:24:46,083 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:46,085 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T15:24:46,086 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 33885ab322a31541d17da102b047512b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74533868, jitterRate=0.11064118146896362}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:24:46,087 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:46,087 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., pid=157, masterSystemTime=1732289086073 2024-11-22T15:24:46,088 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:46,089 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:46,089 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=OPEN, openSeqNum=2, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:46,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-22T15:24:46,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 in 169 msec 2024-11-22T15:24:46,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-22T15:24:46,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, ASSIGN in 325 msec 2024-11-22T15:24:46,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T15:24:46,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289086093"}]},"ts":"1732289086093"} 2024-11-22T15:24:46,094 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-22T15:24:46,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T15:24:46,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2660 sec 2024-11-22T15:24:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-22T15:24:46,949 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-22T15:24:46,950 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2209c520 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5765d46a 2024-11-22T15:24:46,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d9954b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:46,988 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:46,989 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:46,990 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T15:24:46,990 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T15:24:46,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-22T15:24:46,992 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T15:24:46,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-22T15:24:46,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742521_1697 (size=999) 2024-11-22T15:24:47,401 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-22T15:24:47,401 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-22T15:24:47,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:24:47,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, REOPEN/MOVE}] 2024-11-22T15:24:47,406 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, REOPEN/MOVE 2024-11-22T15:24:47,407 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:47,408 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:24:47,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:47,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:47,560 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,560 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:24:47,560 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 33885ab322a31541d17da102b047512b, disabling compactions & flushes 2024-11-22T15:24:47,560 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,560 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,560 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. after waiting 0 ms 2024-11-22T15:24:47,560 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,565 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-22T15:24:47,565 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,565 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:47,565 WARN [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 33885ab322a31541d17da102b047512b to self. 2024-11-22T15:24:47,567 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,567 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=CLOSED 2024-11-22T15:24:47,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-22T15:24:47,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 in 160 msec 2024-11-22T15:24:47,570 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, REOPEN/MOVE; state=CLOSED, location=77927f992d0b,36033,1732288915809; forceNewPlan=false, retain=true 2024-11-22T15:24:47,720 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=OPENING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:47,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:24:47,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:47,874 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,874 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} 2024-11-22T15:24:47,875 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,875 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T15:24:47,875 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,875 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,876 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,876 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:47,876 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName A 2024-11-22T15:24:47,877 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:47,877 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:47,878 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,878 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:47,878 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName B 2024-11-22T15:24:47,878 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:47,878 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:47,879 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,879 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-22T15:24:47,879 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33885ab322a31541d17da102b047512b columnFamilyName C 2024-11-22T15:24:47,879 DEBUG [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:47,879 INFO [StoreOpener-33885ab322a31541d17da102b047512b-1 {}] regionserver.HStore(327): Store=33885ab322a31541d17da102b047512b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T15:24:47,880 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,880 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,881 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,882 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T15:24:47,884 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 33885ab322a31541d17da102b047512b 2024-11-22T15:24:47,884 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 33885ab322a31541d17da102b047512b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66008382, jitterRate=-0.016398459672927856}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T15:24:47,885 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:47,886 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., pid=162, masterSystemTime=1732289087872 2024-11-22T15:24:47,887 DEBUG [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,887 INFO [RS_OPEN_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:47,888 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=OPEN, openSeqNum=5, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:24:47,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-11-22T15:24:47,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 in 167 msec 2024-11-22T15:24:47,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-22T15:24:47,890 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, REOPEN/MOVE in 484 msec 2024-11-22T15:24:47,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-22T15:24:47,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-22T15:24:47,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 900 msec 2024-11-22T15:24:47,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-22T15:24:47,894 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-11-22T15:24:47,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,934 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-11-22T15:24:47,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,942 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-11-22T15:24:47,950 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,951 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-11-22T15:24:47,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-22T15:24:47,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,975 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e757135 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f6a59e4 2024-11-22T15:24:47,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d836f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,984 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-11-22T15:24:47,991 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:47,992 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-11-22T15:24:48,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:48,009 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-11-22T15:24:48,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:48,017 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:52970 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-11-22T15:24:48,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T15:24:48,031 DEBUG [hconnection-0x12ce9caf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,032 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40664, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,035 DEBUG [hconnection-0x13761892-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,036 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,038 DEBUG [hconnection-0x74e0c639-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,040 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40670, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,041 DEBUG [hconnection-0x1220dca1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,043 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,046 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:48,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-22T15:24:48,048 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:48,049 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:48,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:48,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:48,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:48,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:48,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,055 DEBUG [hconnection-0x7631f273-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,056 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,068 DEBUG [hconnection-0x34edfa4f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,071 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40688, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,078 DEBUG [hconnection-0x688f8dc8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,079 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,084 DEBUG [hconnection-0x6335296b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,086 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,089 DEBUG [hconnection-0x4485ea65-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,090 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,096 DEBUG [hconnection-0x2a1a4274-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T15:24:48,100 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T15:24:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289148100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289148100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289148100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289148104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289148115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122540c1730b3364538928ba85cfc49d1d6_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289088053/Put/seqid=0 2024-11-22T15:24:48,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:48,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742522_1698 (size=12154) 2024-11-22T15:24:48,167 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:48,171 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122540c1730b3364538928ba85cfc49d1d6_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122540c1730b3364538928ba85cfc49d1d6_33885ab322a31541d17da102b047512b 2024-11-22T15:24:48,173 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/1d827f1db37040a4917d32e00228321f, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:48,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/1d827f1db37040a4917d32e00228321f is 175, key is test_row_0/A:col10/1732289088053/Put/seqid=0 2024-11-22T15:24:48,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742523_1699 (size=30955) 2024-11-22T15:24:48,178 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/1d827f1db37040a4917d32e00228321f 2024-11-22T15:24:48,201 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T15:24:48,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:48,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:48,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:48,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:48,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:48,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2f6e09938d444f33acf604efa5119b78 is 50, key is test_row_0/B:col10/1732289088053/Put/seqid=0 2024-11-22T15:24:48,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289148205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289148206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289148206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289148209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289148220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742524_1700 (size=12001) 2024-11-22T15:24:48,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2f6e09938d444f33acf604efa5119b78 2024-11-22T15:24:48,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/e89cd632b1fa481ba8a29f6bdfd32deb is 50, key is test_row_0/C:col10/1732289088053/Put/seqid=0 2024-11-22T15:24:48,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742525_1701 (size=12001) 2024-11-22T15:24:48,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/e89cd632b1fa481ba8a29f6bdfd32deb 2024-11-22T15:24:48,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/1d827f1db37040a4917d32e00228321f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f 2024-11-22T15:24:48,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f, entries=150, sequenceid=15, filesize=30.2 K 2024-11-22T15:24:48,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2f6e09938d444f33acf604efa5119b78 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78 2024-11-22T15:24:48,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78, entries=150, sequenceid=15, filesize=11.7 K 2024-11-22T15:24:48,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/e89cd632b1fa481ba8a29f6bdfd32deb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb 2024-11-22T15:24:48,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb, entries=150, sequenceid=15, filesize=11.7 K 2024-11-22T15:24:48,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 33885ab322a31541d17da102b047512b in 253ms, sequenceid=15, compaction requested=false 2024-11-22T15:24:48,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:48,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:48,353 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-22T15:24:48,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:48,355 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:48,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225d15c63d17324fb3b85cb1225c2cdcb1_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289088098/Put/seqid=0 2024-11-22T15:24:48,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742526_1702 (size=12154) 2024-11-22T15:24:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:48,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289148445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289148445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289148446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289148446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289148447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289148550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289148550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289148550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289148550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289148550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:48,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289148751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289148752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289148752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289148752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289148753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:48,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:48,780 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225d15c63d17324fb3b85cb1225c2cdcb1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d15c63d17324fb3b85cb1225c2cdcb1_33885ab322a31541d17da102b047512b 2024-11-22T15:24:48,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8226ecd475c74c4ea15806cb4f885ea0, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:48,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8226ecd475c74c4ea15806cb4f885ea0 is 175, key is test_row_0/A:col10/1732289088098/Put/seqid=0 2024-11-22T15:24:48,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742527_1703 (size=30955) 2024-11-22T15:24:48,786 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8226ecd475c74c4ea15806cb4f885ea0 2024-11-22T15:24:48,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/af302f7bcd0a40fb9a93bc9a875f8999 is 50, key is test_row_0/B:col10/1732289088098/Put/seqid=0 2024-11-22T15:24:48,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742528_1704 (size=12001) 2024-11-22T15:24:49,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289149053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289149054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289149056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289149056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289149056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:49,202 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/af302f7bcd0a40fb9a93bc9a875f8999 2024-11-22T15:24:49,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/7fd1176917814c57afe4d2e7531c6933 is 50, key is test_row_0/C:col10/1732289088098/Put/seqid=0 2024-11-22T15:24:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742529_1705 (size=12001) 2024-11-22T15:24:49,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289149557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289149558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289149559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289149559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289149562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:49,615 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/7fd1176917814c57afe4d2e7531c6933 2024-11-22T15:24:49,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8226ecd475c74c4ea15806cb4f885ea0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0 2024-11-22T15:24:49,621 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0, entries=150, sequenceid=40, filesize=30.2 K 2024-11-22T15:24:49,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/af302f7bcd0a40fb9a93bc9a875f8999 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999 2024-11-22T15:24:49,624 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999, entries=150, sequenceid=40, filesize=11.7 K 2024-11-22T15:24:49,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/7fd1176917814c57afe4d2e7531c6933 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933 2024-11-22T15:24:49,628 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933, entries=150, sequenceid=40, filesize=11.7 K 2024-11-22T15:24:49,629 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 33885ab322a31541d17da102b047512b in 1275ms, sequenceid=40, compaction requested=false 2024-11-22T15:24:49,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:49,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:49,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-22T15:24:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-22T15:24:49,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-22T15:24:49,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5810 sec 2024-11-22T15:24:49,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.5860 sec 2024-11-22T15:24:49,851 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T15:24:50,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-22T15:24:50,153 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-22T15:24:50,155 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:50,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-22T15:24:50,156 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T15:24:50,157 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:50,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:50,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T15:24:50,308 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:50,309 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:50,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122326ed48ce2484d5baa0bb1038bafa1e8_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289088446/Put/seqid=0 2024-11-22T15:24:50,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742530_1706 (size=12154) 2024-11-22T15:24:50,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:50,321 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122326ed48ce2484d5baa0bb1038bafa1e8_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122326ed48ce2484d5baa0bb1038bafa1e8_33885ab322a31541d17da102b047512b 2024-11-22T15:24:50,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/b8b58282becd438e9f6305186e142f3b, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:50,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/b8b58282becd438e9f6305186e142f3b is 175, key is test_row_0/A:col10/1732289088446/Put/seqid=0 2024-11-22T15:24:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742531_1707 (size=30955) 2024-11-22T15:24:50,329 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/b8b58282becd438e9f6305186e142f3b 2024-11-22T15:24:50,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f337afb61553453995195f568c849e19 is 50, key is test_row_0/B:col10/1732289088446/Put/seqid=0 2024-11-22T15:24:50,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742532_1708 (size=12001) 2024-11-22T15:24:50,339 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f337afb61553453995195f568c849e19 2024-11-22T15:24:50,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/4b408c027ba94ff1b796816392305c19 is 50, key is test_row_0/C:col10/1732289088446/Put/seqid=0 2024-11-22T15:24:50,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742533_1709 (size=12001) 2024-11-22T15:24:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T15:24:50,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:50,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:50,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289150624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289150625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289150625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289150625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289150626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289150730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289150731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289150731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289150731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289150731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,755 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/4b408c027ba94ff1b796816392305c19 2024-11-22T15:24:50,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T15:24:50,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/b8b58282becd438e9f6305186e142f3b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b 2024-11-22T15:24:50,763 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b, entries=150, sequenceid=51, filesize=30.2 K 2024-11-22T15:24:50,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f337afb61553453995195f568c849e19 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19 2024-11-22T15:24:50,768 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T15:24:50,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/4b408c027ba94ff1b796816392305c19 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19 2024-11-22T15:24:50,781 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19, entries=150, sequenceid=51, filesize=11.7 K 2024-11-22T15:24:50,781 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 33885ab322a31541d17da102b047512b in 472ms, sequenceid=51, compaction requested=true 2024-11-22T15:24:50,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:50,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:50,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-22T15:24:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-22T15:24:50,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-22T15:24:50,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 626 msec 2024-11-22T15:24:50,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 630 msec 2024-11-22T15:24:50,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:50,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:24:50,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:50,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:50,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:50,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:50,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289150940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112204c2e2f9b75a4f50a3731899edb48432_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289150942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289150943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289150944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289150944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:50,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742534_1710 (size=14594) 2024-11-22T15:24:50,950 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:50,953 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112204c2e2f9b75a4f50a3731899edb48432_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112204c2e2f9b75a4f50a3731899edb48432_33885ab322a31541d17da102b047512b 2024-11-22T15:24:50,953 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/7059e3be1f96435ab0826040af2a4822, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:50,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/7059e3be1f96435ab0826040af2a4822 is 175, key is test_row_0/A:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:50,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742535_1711 (size=39549) 2024-11-22T15:24:50,968 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/7059e3be1f96435ab0826040af2a4822 2024-11-22T15:24:50,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0ed0f807e6714156afd2c03bbf8830e2 is 50, key is test_row_0/B:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:50,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742536_1712 (size=12001) 2024-11-22T15:24:51,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289151045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289151048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289151048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289151048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289151048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289151249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289151252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289151253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289151253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289151254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-22T15:24:51,259 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-22T15:24:51,260 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:51,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-22T15:24:51,268 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:51,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:51,268 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:51,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:51,397 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0ed0f807e6714156afd2c03bbf8830e2 2024-11-22T15:24:51,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/56bdf4db8cf24d15b1ab634497e4295d is 50, key is test_row_0/C:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:51,420 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T15:24:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742537_1713 (size=12001) 2024-11-22T15:24:51,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289151554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289151555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289151557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289151557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:51,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289151558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:51,573 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T15:24:51,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:51,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T15:24:51,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:51,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/56bdf4db8cf24d15b1ab634497e4295d 2024-11-22T15:24:51,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/7059e3be1f96435ab0826040af2a4822 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822 2024-11-22T15:24:51,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822, entries=200, sequenceid=78, filesize=38.6 K 2024-11-22T15:24:51,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0ed0f807e6714156afd2c03bbf8830e2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2 2024-11-22T15:24:51,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2, entries=150, sequenceid=78, filesize=11.7 K 2024-11-22T15:24:51,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/56bdf4db8cf24d15b1ab634497e4295d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d 2024-11-22T15:24:51,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d, entries=150, sequenceid=78, filesize=11.7 K 2024-11-22T15:24:51,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 33885ab322a31541d17da102b047512b in 911ms, sequenceid=78, compaction requested=true 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:51,848 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:51,848 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:51,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:24:51,849 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,849 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,849 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=129.3 K 2024-11-22T15:24:51,849 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=46.9 K 2024-11-22T15:24:51,849 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822] 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f6e09938d444f33acf604efa5119b78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732289088044 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d827f1db37040a4917d32e00228321f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732289088044 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting af302f7bcd0a40fb9a93bc9a875f8999, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732289088098 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 8226ecd475c74c4ea15806cb4f885ea0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732289088098 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f337afb61553453995195f568c849e19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732289088444 2024-11-22T15:24:51,849 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b8b58282becd438e9f6305186e142f3b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732289088444 2024-11-22T15:24:51,850 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ed0f807e6714156afd2c03bbf8830e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:51,850 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 7059e3be1f96435ab0826040af2a4822, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:51,854 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:51,855 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#615 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:51,855 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/c8096ced47c241e69e0d8a67c3b2a0c1 is 50, key is test_row_0/B:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:51,856 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411220ed43ee8f40f468f8fc7684a7d77b163_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:51,859 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411220ed43ee8f40f468f8fc7684a7d77b163_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:51,859 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411220ed43ee8f40f468f8fc7684a7d77b163_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:51,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742538_1714 (size=12139) 2024-11-22T15:24:51,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742539_1715 (size=4469) 2024-11-22T15:24:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:51,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:51,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:51,878 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:51,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:51,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122287f73bb84d149dc89a9600f20c50baf_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289090941/Put/seqid=0 2024-11-22T15:24:51,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742540_1716 (size=12154) 2024-11-22T15:24:51,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:51,889 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122287f73bb84d149dc89a9600f20c50baf_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122287f73bb84d149dc89a9600f20c50baf_33885ab322a31541d17da102b047512b 2024-11-22T15:24:51,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/79976962067f42dfa7e29fe798bf87c9, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:51,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/79976962067f42dfa7e29fe798bf87c9 is 175, key is test_row_0/A:col10/1732289090941/Put/seqid=0 2024-11-22T15:24:51,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742541_1717 (size=30955) 2024-11-22T15:24:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:52,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:52,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289152091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289152092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289152092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289152093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289152096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289152197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289152197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289152197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289152198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289152207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,262 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#616 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:52,263 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/9e08f9d8c7864999b84ee58a6236bfab is 175, key is test_row_0/A:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:52,267 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/c8096ced47c241e69e0d8a67c3b2a0c1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/c8096ced47c241e69e0d8a67c3b2a0c1 2024-11-22T15:24:52,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742542_1718 (size=31093) 2024-11-22T15:24:52,271 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into c8096ced47c241e69e0d8a67c3b2a0c1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:52,271 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:52,271 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=12, startTime=1732289091848; duration=0sec 2024-11-22T15:24:52,271 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:52,271 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:24:52,271 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T15:24:52,272 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T15:24:52,272 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:24:52,272 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:52,272 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=46.9 K 2024-11-22T15:24:52,273 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting e89cd632b1fa481ba8a29f6bdfd32deb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732289088044 2024-11-22T15:24:52,273 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/9e08f9d8c7864999b84ee58a6236bfab as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab 2024-11-22T15:24:52,273 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd1176917814c57afe4d2e7531c6933, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732289088098 2024-11-22T15:24:52,273 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b408c027ba94ff1b796816392305c19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732289088444 2024-11-22T15:24:52,273 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56bdf4db8cf24d15b1ab634497e4295d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:52,280 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into 9e08f9d8c7864999b84ee58a6236bfab(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:52,280 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:52,280 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=12, startTime=1732289091848; duration=0sec 2024-11-22T15:24:52,280 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:52,280 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:24:52,284 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#618 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:52,284 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/fdde57c11a5b44a0906807406844b9bf is 50, key is test_row_0/C:col10/1732289090624/Put/seqid=0 2024-11-22T15:24:52,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742543_1719 (size=12139) 2024-11-22T15:24:52,293 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=88, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/79976962067f42dfa7e29fe798bf87c9 2024-11-22T15:24:52,296 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/fdde57c11a5b44a0906807406844b9bf as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/fdde57c11a5b44a0906807406844b9bf 2024-11-22T15:24:52,300 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into fdde57c11a5b44a0906807406844b9bf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:52,300 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:52,300 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=12, startTime=1732289091848; duration=0sec 2024-11-22T15:24:52,300 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:52,300 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:24:52,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b02fd509ec448e3a8d7df374979827e is 50, key is test_row_0/B:col10/1732289090941/Put/seqid=0 2024-11-22T15:24:52,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742544_1720 (size=12001) 2024-11-22T15:24:52,306 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b02fd509ec448e3a8d7df374979827e 2024-11-22T15:24:52,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/3bf50a1ccc2644d78ad67e1eca1d4e81 is 50, key is test_row_0/C:col10/1732289090941/Put/seqid=0 2024-11-22T15:24:52,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742545_1721 (size=12001) 2024-11-22T15:24:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:52,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289152400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289152401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289152402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289152402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289152408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289152703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289152705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289152705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289152706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289152711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:52,719 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/3bf50a1ccc2644d78ad67e1eca1d4e81 2024-11-22T15:24:52,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/79976962067f42dfa7e29fe798bf87c9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9 2024-11-22T15:24:52,725 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9, entries=150, sequenceid=88, filesize=30.2 K 2024-11-22T15:24:52,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b02fd509ec448e3a8d7df374979827e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e 2024-11-22T15:24:52,728 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e, entries=150, sequenceid=88, filesize=11.7 K 2024-11-22T15:24:52,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/3bf50a1ccc2644d78ad67e1eca1d4e81 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81 2024-11-22T15:24:52,731 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81, entries=150, sequenceid=88, filesize=11.7 K 2024-11-22T15:24:52,731 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 33885ab322a31541d17da102b047512b in 853ms, sequenceid=88, compaction requested=false 2024-11-22T15:24:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-22T15:24:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-22T15:24:52,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-22T15:24:52,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4640 sec 2024-11-22T15:24:52,734 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.4730 sec 2024-11-22T15:24:52,993 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T15:24:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:53,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:53,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:53,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122262d091cd8d6422b91076f461498eccb_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:53,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742546_1722 (size=17034) 2024-11-22T15:24:53,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289153216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289153217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289153217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289153219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289153219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289153320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289153320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289153323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289153323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-22T15:24:53,372 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-22T15:24:53,373 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-22T15:24:53,374 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:53,374 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:53,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:53,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289153522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289153522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,525 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:53,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:53,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289153524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289153524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,619 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:53,622 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122262d091cd8d6422b91076f461498eccb_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122262d091cd8d6422b91076f461498eccb_33885ab322a31541d17da102b047512b 2024-11-22T15:24:53,622 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f183745a497446bbb7ca8d48cae03878, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:53,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f183745a497446bbb7ca8d48cae03878 is 175, key is test_row_0/A:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742547_1723 (size=48139) 2024-11-22T15:24:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:53,677 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:53,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:53,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,831 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:53,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:53,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289153825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289153831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289153831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:53,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289153833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:53,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:53,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:53,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:53,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:53,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,027 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f183745a497446bbb7ca8d48cae03878 2024-11-22T15:24:54,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/14a989f59e144d38b0c7f1c0c31c64cb is 50, key is test_row_0/B:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:54,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742548_1724 (size=12001) 2024-11-22T15:24:54,136 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:54,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:54,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289154229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:54,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:54,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289154333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:54,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289154334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:54,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289154335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:54,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289154339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/14a989f59e144d38b0c7f1c0c31c64cb 2024-11-22T15:24:54,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:54,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/1538c6a6db0347a1aae5d2d2f1e32000 is 50, key is test_row_0/C:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:54,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742549_1725 (size=12001) 2024-11-22T15:24:54,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:54,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:54,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,744 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:54,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:54,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/1538c6a6db0347a1aae5d2d2f1e32000 2024-11-22T15:24:54,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f183745a497446bbb7ca8d48cae03878 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878 2024-11-22T15:24:54,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878, entries=250, sequenceid=119, filesize=47.0 K 2024-11-22T15:24:54,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/14a989f59e144d38b0c7f1c0c31c64cb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb 2024-11-22T15:24:54,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb, entries=150, sequenceid=119, filesize=11.7 K 2024-11-22T15:24:54,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/1538c6a6db0347a1aae5d2d2f1e32000 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000 2024-11-22T15:24:54,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000, entries=150, sequenceid=119, filesize=11.7 K 2024-11-22T15:24:54,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 33885ab322a31541d17da102b047512b in 1648ms, sequenceid=119, compaction requested=true 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:54,858 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:54,858 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:54,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:24:54,859 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,859 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/c8096ced47c241e69e0d8a67c3b2a0c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.3 K 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110187 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:24:54,859 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,859 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=107.6 K 2024-11-22T15:24:54,859 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878] 2024-11-22T15:24:54,859 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c8096ced47c241e69e0d8a67c3b2a0c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:54,860 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e08f9d8c7864999b84ee58a6236bfab, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:54,860 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b02fd509ec448e3a8d7df374979827e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732289090941 2024-11-22T15:24:54,860 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79976962067f42dfa7e29fe798bf87c9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732289090941 2024-11-22T15:24:54,860 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 14a989f59e144d38b0c7f1c0c31c64cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:54,860 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f183745a497446bbb7ca8d48cae03878, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:54,864 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:54,865 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#624 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:54,865 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411229ea5d3541e58404ebd79fdece360c584_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:54,865 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2f26571ab594431ebdfad5f592caba21 is 50, key is test_row_0/B:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:54,867 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411229ea5d3541e58404ebd79fdece360c584_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:54,867 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229ea5d3541e58404ebd79fdece360c584_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:54,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742551_1727 (size=4469) 2024-11-22T15:24:54,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742550_1726 (size=12241) 2024-11-22T15:24:54,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:54,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:54,897 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:54,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e286ec1702bd4c9a83011161d94ba833_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289093213/Put/seqid=0 2024-11-22T15:24:54,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742552_1728 (size=12154) 2024-11-22T15:24:55,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:24:55,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-22T15:24:55,276 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#625 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:55,277 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/78907f1e2c124299b6bf7162930a6a6f is 175, key is test_row_0/A:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:55,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742553_1729 (size=31195) 2024-11-22T15:24:55,286 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2f26571ab594431ebdfad5f592caba21 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f26571ab594431ebdfad5f592caba21 2024-11-22T15:24:55,290 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into 2f26571ab594431ebdfad5f592caba21(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:55,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:55,290 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289094858; duration=0sec 2024-11-22T15:24:55,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:55,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:24:55,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:55,291 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:55,291 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:24:55,291 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:55,291 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/fdde57c11a5b44a0906807406844b9bf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.3 K 2024-11-22T15:24:55,291 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting fdde57c11a5b44a0906807406844b9bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732289090624 2024-11-22T15:24:55,291 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bf50a1ccc2644d78ad67e1eca1d4e81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732289090941 2024-11-22T15:24:55,292 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 1538c6a6db0347a1aae5d2d2f1e32000, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:55,296 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#627 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:55,296 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/718e449caab648989a65cb1c9dc9de8c is 50, key is test_row_0/C:col10/1732289093209/Put/seqid=0 2024-11-22T15:24:55,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742554_1730 (size=12241) 2024-11-22T15:24:55,302 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/718e449caab648989a65cb1c9dc9de8c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/718e449caab648989a65cb1c9dc9de8c 2024-11-22T15:24:55,305 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into 718e449caab648989a65cb1c9dc9de8c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:55,305 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:55,305 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289094858; duration=0sec 2024-11-22T15:24:55,305 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:55,305 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:24:55,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:55,327 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e286ec1702bd4c9a83011161d94ba833_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e286ec1702bd4c9a83011161d94ba833_33885ab322a31541d17da102b047512b 2024-11-22T15:24:55,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/08e3e94c594d40f6800b117668ef45c2, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:55,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/08e3e94c594d40f6800b117668ef45c2 is 175, key is test_row_0/A:col10/1732289093213/Put/seqid=0 2024-11-22T15:24:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742555_1731 (size=30955) 2024-11-22T15:24:55,334 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/08e3e94c594d40f6800b117668ef45c2 2024-11-22T15:24:55,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2535c78af7d547418457b1686dfa8796 is 50, key is test_row_0/B:col10/1732289093213/Put/seqid=0 2024-11-22T15:24:55,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:55,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:55,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742556_1732 (size=12001) 2024-11-22T15:24:55,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289155360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289155361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289155362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289155363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289155463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289155464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289155465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289155465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:55,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289155667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289155667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289155669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:55,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289155669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:55,687 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/78907f1e2c124299b6bf7162930a6a6f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f 2024-11-22T15:24:55,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into 78907f1e2c124299b6bf7162930a6a6f(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:55,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:55,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289094858; duration=0sec 2024-11-22T15:24:55,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:55,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:24:55,745 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2535c78af7d547418457b1686dfa8796 2024-11-22T15:24:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46e76416bdc1488785518b36fe61440e is 50, key is test_row_0/C:col10/1732289093213/Put/seqid=0 2024-11-22T15:24:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742557_1733 (size=12001) 2024-11-22T15:24:55,752 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46e76416bdc1488785518b36fe61440e 2024-11-22T15:24:55,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/08e3e94c594d40f6800b117668ef45c2 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2 2024-11-22T15:24:55,765 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2, entries=150, sequenceid=128, filesize=30.2 K 2024-11-22T15:24:55,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/2535c78af7d547418457b1686dfa8796 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796 2024-11-22T15:24:55,768 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796, entries=150, sequenceid=128, filesize=11.7 K 2024-11-22T15:24:55,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46e76416bdc1488785518b36fe61440e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e 2024-11-22T15:24:55,772 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e, entries=150, sequenceid=128, filesize=11.7 K 2024-11-22T15:24:55,773 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 33885ab322a31541d17da102b047512b in 875ms, sequenceid=128, compaction requested=false 2024-11-22T15:24:55,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:55,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:55,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-22T15:24:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-22T15:24:55,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-22T15:24:55,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4000 sec 2024-11-22T15:24:55,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.4020 sec 2024-11-22T15:24:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:55,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:55,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:55,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224e125532676345789f21a97a10e3f85d_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:55,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742558_1734 (size=14794) 2024-11-22T15:24:56,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289155978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289156007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289156007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289156007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289156108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289156112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289156112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289156112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289156235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,238 DEBUG [Thread-3039 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:24:56,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289156310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289156315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289156315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289156315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,384 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:56,386 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224e125532676345789f21a97a10e3f85d_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224e125532676345789f21a97a10e3f85d_33885ab322a31541d17da102b047512b 2024-11-22T15:24:56,387 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/872389ffff0645f898cdd74e0876a7fb, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:56,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/872389ffff0645f898cdd74e0876a7fb is 175, key is test_row_0/A:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:56,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742559_1735 (size=39749) 2024-11-22T15:24:56,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289156612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289156616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:56,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289156618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289156618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:56,791 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/872389ffff0645f898cdd74e0876a7fb 2024-11-22T15:24:56,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f76e3b2076d64fd49e3fb742334d47ee is 50, key is test_row_0/B:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742560_1736 (size=12151) 2024-11-22T15:24:56,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f76e3b2076d64fd49e3fb742334d47ee 2024-11-22T15:24:56,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/050ff3cb395e4a4eaa1b99912f1f1ce1 is 50, key is test_row_0/C:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:56,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742561_1737 (size=12151) 2024-11-22T15:24:57,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:57,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289157116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:57,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289157119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:57,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289157122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:57,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289157122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:57,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/050ff3cb395e4a4eaa1b99912f1f1ce1 2024-11-22T15:24:57,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/872389ffff0645f898cdd74e0876a7fb as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb 2024-11-22T15:24:57,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb, entries=200, sequenceid=161, filesize=38.8 K 2024-11-22T15:24:57,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f76e3b2076d64fd49e3fb742334d47ee as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee 2024-11-22T15:24:57,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee, entries=150, sequenceid=161, filesize=11.9 K 2024-11-22T15:24:57,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/050ff3cb395e4a4eaa1b99912f1f1ce1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1 2024-11-22T15:24:57,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1, entries=150, sequenceid=161, filesize=11.9 K 2024-11-22T15:24:57,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for 33885ab322a31541d17da102b047512b in 1248ms, sequenceid=161, compaction requested=true 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:57,222 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:57,222 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:57,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:24:57,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:24:57,223 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:57,223 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:57,223 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=99.5 K 2024-11-22T15:24:57,223 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f26571ab594431ebdfad5f592caba21, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.5 K 2024-11-22T15:24:57,223 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb] 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f26571ab594431ebdfad5f592caba21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:57,223 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78907f1e2c124299b6bf7162930a6a6f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:57,224 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2535c78af7d547418457b1686dfa8796, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732289093213 2024-11-22T15:24:57,224 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08e3e94c594d40f6800b117668ef45c2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732289093213 2024-11-22T15:24:57,224 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 872389ffff0645f898cdd74e0876a7fb, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:24:57,224 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f76e3b2076d64fd49e3fb742334d47ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:24:57,227 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:57,228 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#633 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:57,228 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/da28587e3b1346ce87bc87044d7c244e is 50, key is test_row_0/B:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:57,229 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122b0acb67d36da421e9b574a10402be51c_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:57,230 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122b0acb67d36da421e9b574a10402be51c_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:57,230 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122b0acb67d36da421e9b574a10402be51c_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742562_1738 (size=12493) 2024-11-22T15:24:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742563_1739 (size=4469) 2024-11-22T15:24:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-22T15:24:57,478 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-22T15:24:57,479 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-22T15:24:57,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:57,480 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:57,480 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:57,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:57,632 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:57,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-22T15:24:57,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:57,632 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:57,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:57,635 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#634 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:57,635 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/2c7dbdad8c7a44cbb4184a2c799182b7 is 175, key is test_row_0/A:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:57,638 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/da28587e3b1346ce87bc87044d7c244e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/da28587e3b1346ce87bc87044d7c244e 2024-11-22T15:24:57,643 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into da28587e3b1346ce87bc87044d7c244e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:57,643 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:57,643 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289097222; duration=0sec 2024-11-22T15:24:57,643 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:24:57,643 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:24:57,643 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:24:57,645 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:24:57,645 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:24:57,645 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:57,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122965496cce0684383ab18a5bbe7b9619a_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289095980/Put/seqid=0 2024-11-22T15:24:57,646 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/718e449caab648989a65cb1c9dc9de8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.5 K 2024-11-22T15:24:57,646 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 718e449caab648989a65cb1c9dc9de8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732289092091 2024-11-22T15:24:57,647 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 46e76416bdc1488785518b36fe61440e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732289093213 2024-11-22T15:24:57,649 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 050ff3cb395e4a4eaa1b99912f1f1ce1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:24:57,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742564_1740 (size=31447) 2024-11-22T15:24:57,664 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#636 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:24:57,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742565_1741 (size=12304) 2024-11-22T15:24:57,665 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/de7d071def2a40cd9f6ada397bd9d9e5 is 50, key is test_row_0/C:col10/1732289095973/Put/seqid=0 2024-11-22T15:24:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742566_1742 (size=12493) 2024-11-22T15:24:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:58,064 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/2c7dbdad8c7a44cbb4184a2c799182b7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7 2024-11-22T15:24:58,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:58,068 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122965496cce0684383ab18a5bbe7b9619a_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122965496cce0684383ab18a5bbe7b9619a_33885ab322a31541d17da102b047512b 2024-11-22T15:24:58,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/cf5af5d41e824675bf6d5da3d7fd3d52, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:58,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/cf5af5d41e824675bf6d5da3d7fd3d52 is 175, key is test_row_0/A:col10/1732289095980/Put/seqid=0 2024-11-22T15:24:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742567_1743 (size=31105) 2024-11-22T15:24:58,073 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into 2c7dbdad8c7a44cbb4184a2c799182b7(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:58,073 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:58,073 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289097222; duration=0sec 2024-11-22T15:24:58,073 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:58,073 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:24:58,074 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/cf5af5d41e824675bf6d5da3d7fd3d52 2024-11-22T15:24:58,074 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/de7d071def2a40cd9f6ada397bd9d9e5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/de7d071def2a40cd9f6ada397bd9d9e5 2024-11-22T15:24:58,078 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into de7d071def2a40cd9f6ada397bd9d9e5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:24:58,078 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:58,079 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289097222; duration=0sec 2024-11-22T15:24:58,079 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:24:58,079 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:24:58,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0a4b0b132eb14ae7a4ea67618fda11a1 is 50, key is test_row_0/B:col10/1732289095980/Put/seqid=0 2024-11-22T15:24:58,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742568_1744 (size=12151) 2024-11-22T15:24:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:58,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:58,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:58,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289158141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289158142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289158142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289158144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289158245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289158245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289158245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289158247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289158449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289158449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289158449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289158449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,483 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0a4b0b132eb14ae7a4ea67618fda11a1 2024-11-22T15:24:58,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/768abbacde7e4074b21f474cd77ac626 is 50, key is test_row_0/C:col10/1732289095980/Put/seqid=0 2024-11-22T15:24:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742569_1745 (size=12151) 2024-11-22T15:24:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:58,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289158752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289158752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289158753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:58,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289158754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:58,892 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/768abbacde7e4074b21f474cd77ac626 2024-11-22T15:24:58,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/cf5af5d41e824675bf6d5da3d7fd3d52 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52 2024-11-22T15:24:58,898 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52, entries=150, sequenceid=169, filesize=30.4 K 2024-11-22T15:24:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/0a4b0b132eb14ae7a4ea67618fda11a1 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1 2024-11-22T15:24:58,900 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1, entries=150, sequenceid=169, filesize=11.9 K 2024-11-22T15:24:58,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/768abbacde7e4074b21f474cd77ac626 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626 2024-11-22T15:24:58,903 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626, entries=150, sequenceid=169, filesize=11.9 K 2024-11-22T15:24:58,903 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 33885ab322a31541d17da102b047512b in 1271ms, sequenceid=169, compaction requested=false 2024-11-22T15:24:58,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:24:58,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:58,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-22T15:24:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-22T15:24:58,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-22T15:24:58,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4230 sec 2024-11-22T15:24:58,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.4260 sec 2024-11-22T15:24:59,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:24:59,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:24:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:24:59,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ef97f16746ff4fd4870588616cb4f36c_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289098143/Put/seqid=0 2024-11-22T15:24:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289159260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289159260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289159260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289159261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742570_1746 (size=12304) 2024-11-22T15:24:59,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289159364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289159364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289159364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289159364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289159566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289159566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289159567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289159567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-22T15:24:59,587 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-22T15:24:59,588 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:24:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-22T15:24:59,589 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:24:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:24:59,589 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:24:59,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:24:59,665 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:24:59,667 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122ef97f16746ff4fd4870588616cb4f36c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ef97f16746ff4fd4870588616cb4f36c_33885ab322a31541d17da102b047512b 2024-11-22T15:24:59,668 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ec18676388fe43cf91fcc3fd5f31aa40, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:24:59,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ec18676388fe43cf91fcc3fd5f31aa40 is 175, key is test_row_0/A:col10/1732289098143/Put/seqid=0 2024-11-22T15:24:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742571_1747 (size=31105) 2024-11-22T15:24:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:24:59,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:24:59,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:59,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:59,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:59,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:59,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:59,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289159869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289159869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289159870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:24:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289159870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:24:59,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:24:59,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:24:59,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:59,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:24:59,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:24:59,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:59,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:24:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,071 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ec18676388fe43cf91fcc3fd5f31aa40 2024-11-22T15:25:00,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/965d28c13e0448b6a4045bd7a98b218e is 50, key is test_row_0/B:col10/1732289098143/Put/seqid=0 2024-11-22T15:25:00,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742572_1748 (size=12151) 2024-11-22T15:25:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:25:00,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:00,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40694 deadline: 1732289160269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,270 DEBUG [Thread-3039 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:25:00,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:00,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289160372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289160374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289160375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289160376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/965d28c13e0448b6a4045bd7a98b218e 2024-11-22T15:25:00,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/c2d27e9d14be491183e8bd4705b872d7 is 50, key is test_row_0/C:col10/1732289098143/Put/seqid=0 2024-11-22T15:25:00,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742573_1749 (size=12151) 2024-11-22T15:25:00,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:25:00,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:00,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:00,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/c2d27e9d14be491183e8bd4705b872d7 2024-11-22T15:25:00,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ec18676388fe43cf91fcc3fd5f31aa40 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40 2024-11-22T15:25:00,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40, entries=150, sequenceid=201, filesize=30.4 K 2024-11-22T15:25:00,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/965d28c13e0448b6a4045bd7a98b218e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e 2024-11-22T15:25:00,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e, entries=150, sequenceid=201, filesize=11.9 K 2024-11-22T15:25:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/c2d27e9d14be491183e8bd4705b872d7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7 2024-11-22T15:25:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7, entries=150, sequenceid=201, filesize=11.9 K 2024-11-22T15:25:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 33885ab322a31541d17da102b047512b in 1657ms, sequenceid=201, compaction requested=true 2024-11-22T15:25:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:00,913 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:00,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:00,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:25:00,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,915 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:00,915 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:00,915 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:25:00,915 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:25:00,915 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,916 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,916 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=91.5 K 2024-11-22T15:25:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,916 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/da28587e3b1346ce87bc87044d7c244e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.9 K 2024-11-22T15:25:00,916 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40] 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting da28587e3b1346ce87bc87044d7c244e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c7dbdad8c7a44cbb4184a2c799182b7, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:25:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a4b0b132eb14ae7a4ea67618fda11a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732289095975 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf5af5d41e824675bf6d5da3d7fd3d52, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732289095975 2024-11-22T15:25:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 965d28c13e0448b6a4045bd7a98b218e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:00,916 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec18676388fe43cf91fcc3fd5f31aa40, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,923 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,926 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#643 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,926 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/502062493ce6457ebcfe89fd5cefe1aa is 50, key is test_row_0/B:col10/1732289098143/Put/seqid=0 2024-11-22T15:25:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,936 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122e2ace86e9a0e49fab0bd6c3dbbd33732_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:00,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,938 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122e2ace86e9a0e49fab0bd6c3dbbd33732_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:00,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,938 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122e2ace86e9a0e49fab0bd6c3dbbd33732_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:00,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742574_1750 (size=12595) 2024-11-22T15:25:00,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,952 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/502062493ce6457ebcfe89fd5cefe1aa as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/502062493ce6457ebcfe89fd5cefe1aa 2024-11-22T15:25:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,955 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into 502062493ce6457ebcfe89fd5cefe1aa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:00,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:00,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,956 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289100913; duration=0sec 2024-11-22T15:25:00,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:00,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:25:00,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:00,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,956 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:00,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:25:00,957 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,957 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/de7d071def2a40cd9f6ada397bd9d9e5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=35.9 K 2024-11-22T15:25:00,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting de7d071def2a40cd9f6ada397bd9d9e5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732289095361 2024-11-22T15:25:00,957 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 768abbacde7e4074b21f474cd77ac626, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732289095975 2024-11-22T15:25:00,958 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c2d27e9d14be491183e8bd4705b872d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:00,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:00,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,961 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T15:25:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:00,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,968 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#644 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:00,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,969 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/2d722333ccb547d8aec912aaf78bef9c is 50, key is test_row_0/C:col10/1732289098143/Put/seqid=0 2024-11-22T15:25:00,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742575_1751 (size=4469) 2024-11-22T15:25:00,972 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#642 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,972 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/58275b4105ec46728d3e1f8b583eb541 is 175, key is test_row_0/A:col10/1732289098143/Put/seqid=0 2024-11-22T15:25:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225d544a019bd046a4ad6428f5a2fa6ed1_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289099260/Put/seqid=0 2024-11-22T15:25:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742577_1753 (size=31549) 2024-11-22T15:25:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,016 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/58275b4105ec46728d3e1f8b583eb541 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541 2024-11-22T15:25:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,021 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into 58275b4105ec46728d3e1f8b583eb541(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:01,021 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:01,021 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289100913; duration=0sec 2024-11-22T15:25:01,021 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,021 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:25:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742576_1752 (size=12595) 2024-11-22T15:25:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742578_1754 (size=9814) 2024-11-22T15:25:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,438 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/2d722333ccb547d8aec912aaf78bef9c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/2d722333ccb547d8aec912aaf78bef9c 2024-11-22T15:25:01,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:01,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289161434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289161436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289161436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289161436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,443 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into 2d722333ccb547d8aec912aaf78bef9c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:01,443 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:01,443 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289100913; duration=0sec 2024-11-22T15:25:01,443 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:01,444 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:25:01,451 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411225d544a019bd046a4ad6428f5a2fa6ed1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d544a019bd046a4ad6428f5a2fa6ed1_33885ab322a31541d17da102b047512b 2024-11-22T15:25:01,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8b06aa277e814e39a1821d1123159bce, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:01,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8b06aa277e814e39a1821d1123159bce is 175, key is test_row_0/A:col10/1732289099260/Put/seqid=0 2024-11-22T15:25:01,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742579_1755 (size=22461) 2024-11-22T15:25:01,475 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8b06aa277e814e39a1821d1123159bce 2024-11-22T15:25:01,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/102b615823f141ddbecd2c175c0c45f0 is 50, key is test_row_0/B:col10/1732289099260/Put/seqid=0 2024-11-22T15:25:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742580_1756 (size=9757) 2024-11-22T15:25:01,506 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/102b615823f141ddbecd2c175c0c45f0 2024-11-22T15:25:01,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a is 50, key is test_row_0/C:col10/1732289099260/Put/seqid=0 2024-11-22T15:25:01,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742581_1757 (size=9757) 2024-11-22T15:25:01,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289161542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289161542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289161543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289161543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:25:01,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289161744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289161745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289161746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289161746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:01,917 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a 2024-11-22T15:25:01,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/8b06aa277e814e39a1821d1123159bce as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce 2024-11-22T15:25:01,922 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce, entries=100, sequenceid=209, filesize=21.9 K 2024-11-22T15:25:01,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/102b615823f141ddbecd2c175c0c45f0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0 2024-11-22T15:25:01,925 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0, entries=100, sequenceid=209, filesize=9.5 K 2024-11-22T15:25:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a 2024-11-22T15:25:01,929 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a, entries=100, sequenceid=209, filesize=9.5 K 2024-11-22T15:25:01,929 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=181.14 KB/185490 for 33885ab322a31541d17da102b047512b in 968ms, sequenceid=209, compaction requested=false 2024-11-22T15:25:01,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:01,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:01,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-22T15:25:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-22T15:25:01,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-22T15:25:01,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3410 sec 2024-11-22T15:25:01,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 2.3430 sec 2024-11-22T15:25:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:02,048 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:02,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289162049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f5549da637e14b0599434b11a9dbbea1_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289162052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742582_1758 (size=12304) 2024-11-22T15:25:02,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289162155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289162358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f5549da637e14b0599434b11a9dbbea1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f5549da637e14b0599434b11a9dbbea1_33885ab322a31541d17da102b047512b 2024-11-22T15:25:02,462 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ca5af8c360e542db8d35a668e048c31a, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ca5af8c360e542db8d35a668e048c31a is 175, key is test_row_0/A:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742583_1759 (size=31105) 2024-11-22T15:25:02,473 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=242, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ca5af8c360e542db8d35a668e048c31a 2024-11-22T15:25:02,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/b61fd135b1b847f8a7cb1ab59714cad5 is 50, key is test_row_0/B:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742584_1760 (size=12151) 2024-11-22T15:25:02,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/b61fd135b1b847f8a7cb1ab59714cad5 2024-11-22T15:25:02,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/33f6cf1e1975462ab5391cc9bf55a8e6 is 50, key is test_row_0/C:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742585_1761 (size=12151) 2024-11-22T15:25:02,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/33f6cf1e1975462ab5391cc9bf55a8e6 2024-11-22T15:25:02,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ca5af8c360e542db8d35a668e048c31a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a 2024-11-22T15:25:02,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a, entries=150, sequenceid=242, filesize=30.4 K 2024-11-22T15:25:02,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/b61fd135b1b847f8a7cb1ab59714cad5 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5 2024-11-22T15:25:02,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5, entries=150, sequenceid=242, filesize=11.9 K 2024-11-22T15:25:02,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/33f6cf1e1975462ab5391cc9bf55a8e6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6 2024-11-22T15:25:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6, entries=150, sequenceid=242, filesize=11.9 K 2024-11-22T15:25:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=20.13 KB/20610 for 33885ab322a31541d17da102b047512b in 458ms, sequenceid=242, compaction requested=true 2024-11-22T15:25:02,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:02,507 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:02,507 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:25:02,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,507 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:02,507 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:02,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:25:02,508 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:02,508 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:02,508 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=83.1 K 2024-11-22T15:25:02,508 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/502062493ce6457ebcfe89fd5cefe1aa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=33.7 K 2024-11-22T15:25:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a] 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 502062493ce6457ebcfe89fd5cefe1aa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58275b4105ec46728d3e1f8b583eb541, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 102b615823f141ddbecd2c175c0c45f0, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732289099259 2024-11-22T15:25:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b06aa277e814e39a1821d1123159bce, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732289099259 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting b61fd135b1b847f8a7cb1ab59714cad5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,508 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca5af8c360e542db8d35a668e048c31a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,513 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,515 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#652 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:02,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,515 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f018b8da8d244d7f92630c8b677db084 is 50, key is test_row_0/B:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,516 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411223c41dad6c5744c0ab1410b4a4e99758d_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411223c41dad6c5744c0ab1410b4a4e99758d_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,517 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411223c41dad6c5744c0ab1410b4a4e99758d_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742586_1762 (size=12697) 2024-11-22T15:25:02,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742587_1763 (size=4469) 2024-11-22T15:25:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,559 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#651 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,560 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f91d1f1f924b436d8411e0ddb85396df is 175, key is test_row_0/A:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742588_1764 (size=31651) 2024-11-22T15:25:02,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-22T15:25:02,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:02,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:02,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:02,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:02,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:02,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cc4c8f4c7a0742a2b617217abc0d4cc7_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289102569/Put/seqid=0 2024-11-22T15:25:02,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742589_1765 (size=19774) 2024-11-22T15:25:02,612 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:02,615 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122cc4c8f4c7a0742a2b617217abc0d4cc7_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc4c8f4c7a0742a2b617217abc0d4cc7_33885ab322a31541d17da102b047512b 2024-11-22T15:25:02,616 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/0236bf0de2c742b7875d997b7fe10228, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:02,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/0236bf0de2c742b7875d997b7fe10228 is 175, key is test_row_0/A:col10/1732289102569/Put/seqid=0 2024-11-22T15:25:02,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742590_1766 (size=57033) 2024-11-22T15:25:02,622 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/0236bf0de2c742b7875d997b7fe10228 2024-11-22T15:25:02,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/a57573e40e78407db5f7522c8dc4c8d0 is 50, key is test_row_0/B:col10/1732289102569/Put/seqid=0 2024-11-22T15:25:02,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742591_1767 (size=12151) 2024-11-22T15:25:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289162661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289162663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289162769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,945 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/f018b8da8d244d7f92630c8b677db084 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f018b8da8d244d7f92630c8b677db084 2024-11-22T15:25:02,948 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into f018b8da8d244d7f92630c8b677db084(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:02,948 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:02,948 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289102507; duration=0sec 2024-11-22T15:25:02,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:02,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:25:02,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:02,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:02,949 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:25:02,949 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:02,950 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/2d722333ccb547d8aec912aaf78bef9c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=33.7 K 2024-11-22T15:25:02,950 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d722333ccb547d8aec912aaf78bef9c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732289098140 2024-11-22T15:25:02,951 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 80e85f0ca6ac4e5ca52a72e9dc52fa5a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732289099259 2024-11-22T15:25:02,951 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 33f6cf1e1975462ab5391cc9bf55a8e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:02,960 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#655 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:02,960 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/09868501b4864c78b385926418fd77e8 is 50, key is test_row_0/C:col10/1732289102048/Put/seqid=0 2024-11-22T15:25:02,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742592_1768 (size=12697) 2024-11-22T15:25:02,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289162974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289162974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289162976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:02,978 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/09868501b4864c78b385926418fd77e8 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/09868501b4864c78b385926418fd77e8 2024-11-22T15:25:02,982 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/f91d1f1f924b436d8411e0ddb85396df as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df 2024-11-22T15:25:02,987 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into 09868501b4864c78b385926418fd77e8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:02,987 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into f91d1f1f924b436d8411e0ddb85396df(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:02,987 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289102507; duration=0sec 2024-11-22T15:25:02,987 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289102507; duration=0sec 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:25:02,987 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:25:03,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/a57573e40e78407db5f7522c8dc4c8d0 2024-11-22T15:25:03,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/d0648ce00f004e05a9d14de8af776c39 is 50, key is test_row_0/C:col10/1732289102569/Put/seqid=0 2024-11-22T15:25:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742593_1769 (size=12151) 2024-11-22T15:25:03,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/d0648ce00f004e05a9d14de8af776c39 2024-11-22T15:25:03,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/0236bf0de2c742b7875d997b7fe10228 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228 2024-11-22T15:25:03,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228, entries=300, sequenceid=253, filesize=55.7 K 2024-11-22T15:25:03,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/a57573e40e78407db5f7522c8dc4c8d0 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0 2024-11-22T15:25:03,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0, entries=150, sequenceid=253, filesize=11.9 K 2024-11-22T15:25:03,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/d0648ce00f004e05a9d14de8af776c39 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39 2024-11-22T15:25:03,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39, entries=150, sequenceid=253, filesize=11.9 K 2024-11-22T15:25:03,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 33885ab322a31541d17da102b047512b in 546ms, sequenceid=253, compaction requested=false 2024-11-22T15:25:03,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:03,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:03,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:03,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122aceefd1f968e4be3a6497dde984dab5c_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:03,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742594_1770 (size=14994) 2024-11-22T15:25:03,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289163246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289163277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289163278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289163280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289163352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289163555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,650 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:03,659 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122aceefd1f968e4be3a6497dde984dab5c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122aceefd1f968e4be3a6497dde984dab5c_33885ab322a31541d17da102b047512b 2024-11-22T15:25:03,660 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a5dc9625a15a43238925077e94de5136, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:03,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a5dc9625a15a43238925077e94de5136 is 175, key is test_row_0/A:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:03,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-22T15:25:03,693 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-22T15:25:03,694 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:25:03,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-22T15:25:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742595_1771 (size=39949) 2024-11-22T15:25:03,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:03,696 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:25:03,696 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:25:03,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:25:03,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289163783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289163785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289163785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:03,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T15:25:03,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:03,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:03,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:03,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:03,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:03,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289163859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:03,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:04,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T15:25:04,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:04,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,096 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a5dc9625a15a43238925077e94de5136 2024-11-22T15:25:04,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ff3c70de7e064deab6cb79d75dcca0ed is 50, key is test_row_0/B:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:04,154 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T15:25:04,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:04,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:04,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742596_1772 (size=12301) 2024-11-22T15:25:04,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ff3c70de7e064deab6cb79d75dcca0ed 2024-11-22T15:25:04,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/10a556caceee4f24987c0c9b55aac680 is 50, key is test_row_0/C:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:04,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742597_1773 (size=12301) 2024-11-22T15:25:04,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/10a556caceee4f24987c0c9b55aac680 2024-11-22T15:25:04,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a5dc9625a15a43238925077e94de5136 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136 2024-11-22T15:25:04,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136, entries=200, sequenceid=282, filesize=39.0 K 2024-11-22T15:25:04,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ff3c70de7e064deab6cb79d75dcca0ed as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed 2024-11-22T15:25:04,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed, entries=150, sequenceid=282, filesize=12.0 K 2024-11-22T15:25:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/10a556caceee4f24987c0c9b55aac680 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680 2024-11-22T15:25:04,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680, entries=150, sequenceid=282, filesize=12.0 K 2024-11-22T15:25:04,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 33885ab322a31541d17da102b047512b in 1064ms, sequenceid=282, compaction requested=true 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:04,239 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:25:04,239 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:04,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:04,240 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:04,240 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:25:04,240 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,241 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f018b8da8d244d7f92630c8b677db084, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.3 K 2024-11-22T15:25:04,241 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128633 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:04,241 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:25:04,241 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,241 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=125.6 K 2024-11-22T15:25:04,241 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,241 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136] 2024-11-22T15:25:04,242 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting f018b8da8d244d7f92630c8b677db084, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:04,242 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting f91d1f1f924b436d8411e0ddb85396df, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:04,243 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting a57573e40e78407db5f7522c8dc4c8d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732289102050 2024-11-22T15:25:04,243 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0236bf0de2c742b7875d997b7fe10228, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732289102050 2024-11-22T15:25:04,243 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ff3c70de7e064deab6cb79d75dcca0ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:04,243 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5dc9625a15a43238925077e94de5136, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:04,255 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:04,259 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122f5c9f660fcfa4d3eb43f8612716df393_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:04,260 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#661 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:04,261 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/e9914bdeb9c341dba66ea1b19812d2dc is 50, key is test_row_0/B:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:04,261 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122f5c9f660fcfa4d3eb43f8612716df393_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:04,261 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122f5c9f660fcfa4d3eb43f8612716df393_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:04,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742598_1774 (size=4469) 2024-11-22T15:25:04,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742599_1775 (size=12949) 2024-11-22T15:25:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:04,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,308 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:04,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227606e87e042d4233867bced36c16bdf9_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289103196/Put/seqid=0 2024-11-22T15:25:04,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742600_1776 (size=12454) 2024-11-22T15:25:04,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:04,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:04,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289164459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289164563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,675 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#660 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:04,676 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/6859d05e60c54ec0a7e9b49cc5249c1e is 175, key is test_row_0/A:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:04,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742601_1777 (size=31903) 2024-11-22T15:25:04,688 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/e9914bdeb9c341dba66ea1b19812d2dc as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/e9914bdeb9c341dba66ea1b19812d2dc 2024-11-22T15:25:04,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/6859d05e60c54ec0a7e9b49cc5249c1e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e 2024-11-22T15:25:04,692 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into e9914bdeb9c341dba66ea1b19812d2dc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:04,692 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:04,692 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289104239; duration=0sec 2024-11-22T15:25:04,692 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:04,692 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:25:04,692 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:04,693 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:04,693 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:25:04,693 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:04,693 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/09868501b4864c78b385926418fd77e8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.3 K 2024-11-22T15:25:04,694 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 09868501b4864c78b385926418fd77e8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1732289101434 2024-11-22T15:25:04,694 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting d0648ce00f004e05a9d14de8af776c39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732289102050 2024-11-22T15:25:04,694 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 10a556caceee4f24987c0c9b55aac680, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:04,696 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into 6859d05e60c54ec0a7e9b49cc5249c1e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:04,696 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:04,696 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289104239; duration=0sec 2024-11-22T15:25:04,696 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:04,696 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:25:04,700 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#663 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:04,701 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/eee2ba456aa246c2a516245db264685e is 50, key is test_row_0/C:col10/1732289103172/Put/seqid=0 2024-11-22T15:25:04,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742602_1778 (size=12949) 2024-11-22T15:25:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:04,732 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411227606e87e042d4233867bced36c16bdf9_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227606e87e042d4233867bced36c16bdf9_33885ab322a31541d17da102b047512b 2024-11-22T15:25:04,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/127f83de7da44532aad18e7bff44781c, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:04,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/127f83de7da44532aad18e7bff44781c is 175, key is test_row_0/A:col10/1732289103196/Put/seqid=0 2024-11-22T15:25:04,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742603_1779 (size=31255) 2024-11-22T15:25:04,756 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/127f83de7da44532aad18e7bff44781c 2024-11-22T15:25:04,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289164766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/4950f4b834af4e3f89cd1935e594a44e is 50, key is test_row_0/B:col10/1732289103196/Put/seqid=0 2024-11-22T15:25:04,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742604_1780 (size=12301) 2024-11-22T15:25:04,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289164789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289164791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,795 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/4950f4b834af4e3f89cd1935e594a44e 2024-11-22T15:25:04,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:04,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289164794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:04,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:04,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/9b2f5674bd504e1daab621f948d7d95c is 50, key is test_row_0/C:col10/1732289103196/Put/seqid=0 2024-11-22T15:25:04,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742605_1781 (size=12301) 2024-11-22T15:25:05,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289165069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:05,108 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/eee2ba456aa246c2a516245db264685e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/eee2ba456aa246c2a516245db264685e 2024-11-22T15:25:05,111 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into eee2ba456aa246c2a516245db264685e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:05,111 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:05,111 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289104239; duration=0sec 2024-11-22T15:25:05,111 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:05,111 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:25:05,217 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/9b2f5674bd504e1daab621f948d7d95c 2024-11-22T15:25:05,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/127f83de7da44532aad18e7bff44781c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c 2024-11-22T15:25:05,222 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c, entries=150, sequenceid=292, filesize=30.5 K 2024-11-22T15:25:05,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/4950f4b834af4e3f89cd1935e594a44e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e 2024-11-22T15:25:05,225 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e, entries=150, sequenceid=292, filesize=12.0 K 2024-11-22T15:25:05,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/9b2f5674bd504e1daab621f948d7d95c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c 2024-11-22T15:25:05,228 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c, entries=150, sequenceid=292, filesize=12.0 K 2024-11-22T15:25:05,228 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 33885ab322a31541d17da102b047512b in 920ms, sequenceid=292, compaction requested=false 2024-11-22T15:25:05,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:05,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:05,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-22T15:25:05,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-22T15:25:05,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-22T15:25:05,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5330 sec 2024-11-22T15:25:05,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.5360 sec 2024-11-22T15:25:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:05,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:05,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:05,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229fb341dc795f44d6b3255ba41c277d2c_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:05,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289165602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:05,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742606_1782 (size=14994) 2024-11-22T15:25:05,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289165704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-22T15:25:05,800 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-22T15:25:05,801 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-22T15:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-22T15:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:05,803 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-22T15:25:05,803 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T15:25:05,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T15:25:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:05,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289165906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:05,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:05,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:05,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:05,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:05,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:05,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:05,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,016 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:06,019 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411229fb341dc795f44d6b3255ba41c277d2c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229fb341dc795f44d6b3255ba41c277d2c_33885ab322a31541d17da102b047512b 2024-11-22T15:25:06,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/48fe1b62bf8e4f7390c602ad4a47ef88, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:06,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/48fe1b62bf8e4f7390c602ad4a47ef88 is 175, key is test_row_0/A:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:06,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742607_1783 (size=39949) 2024-11-22T15:25:06,023 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=322, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/48fe1b62bf8e4f7390c602ad4a47ef88 2024-11-22T15:25:06,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ea4d4a2d1151439ea9c293991b408f3d is 50, key is test_row_0/B:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:06,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742608_1784 (size=12301) 2024-11-22T15:25:06,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:06,106 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289166209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:06,411 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ea4d4a2d1151439ea9c293991b408f3d 2024-11-22T15:25:06,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/5f980da182c644e89b2c764d94789abd is 50, key is test_row_0/C:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:06,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742609_1785 (size=12301) 2024-11-22T15:25:06,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,714 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289166715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:06,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40666 deadline: 1732289166802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,804 DEBUG [Thread-3031 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:25:06,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:06,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40710 deadline: 1732289166805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,807 DEBUG [Thread-3033 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:25:06,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:06,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40716 deadline: 1732289166811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,812 DEBUG [Thread-3037 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., hostname=77927f992d0b,36033,1732288915809, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T15:25:06,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/5f980da182c644e89b2c764d94789abd 2024-11-22T15:25:06,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/48fe1b62bf8e4f7390c602ad4a47ef88 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88 2024-11-22T15:25:06,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88, entries=200, sequenceid=322, filesize=39.0 K 2024-11-22T15:25:06,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/ea4d4a2d1151439ea9c293991b408f3d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d 2024-11-22T15:25:06,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d, entries=150, sequenceid=322, filesize=12.0 K 2024-11-22T15:25:06,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/5f980da182c644e89b2c764d94789abd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd 2024-11-22T15:25:06,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:06,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:06,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:06,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd, entries=150, sequenceid=322, filesize=12.0 K 2024-11-22T15:25:06,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T15:25:06,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 33885ab322a31541d17da102b047512b in 1280ms, sequenceid=322, compaction requested=true 2024-11-22T15:25:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:06,868 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:06,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:25:06,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:06,869 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:06,869 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:06,869 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:25:06,869 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,869 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=100.7 K 2024-11-22T15:25:06,869 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,869 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88] 2024-11-22T15:25:06,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:25:06,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:06,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:25:06,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:06,870 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6859d05e60c54ec0a7e9b49cc5249c1e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:06,870 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 127f83de7da44532aad18e7bff44781c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732289103196 2024-11-22T15:25:06,870 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:06,870 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48fe1b62bf8e4f7390c602ad4a47ef88, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:06,870 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:25:06,870 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:06,870 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/e9914bdeb9c341dba66ea1b19812d2dc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.7 K 2024-11-22T15:25:06,871 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting e9914bdeb9c341dba66ea1b19812d2dc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:06,871 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 4950f4b834af4e3f89cd1935e594a44e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732289103196 2024-11-22T15:25:06,871 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting ea4d4a2d1151439ea9c293991b408f3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:06,875 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:06,877 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411224db7bcc6f88e4f40ac212bd04c39b84d_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:06,878 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411224db7bcc6f88e4f40ac212bd04c39b84d_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:06,878 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#670 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:06,878 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224db7bcc6f88e4f40ac212bd04c39b84d_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:06,879 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/3785dbf790ad4941b6e04868695b69fd is 50, key is test_row_0/B:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:06,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742610_1786 (size=4469) 2024-11-22T15:25:06,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742611_1787 (size=13051) 2024-11-22T15:25:06,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:07,019 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:07,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36033 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:07,019 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:07,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:07,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:07,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c148f1fafb2848abb975c947a4eb4265_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289105596/Put/seqid=0 2024-11-22T15:25:07,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742612_1788 (size=12454) 2024-11-22T15:25:07,282 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#669 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:07,282 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ac8349f8fdbc406c865deac00bcc1a2e is 175, key is test_row_0/A:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:07,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742613_1789 (size=32005) 2024-11-22T15:25:07,286 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/3785dbf790ad4941b6e04868695b69fd as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/3785dbf790ad4941b6e04868695b69fd 2024-11-22T15:25:07,289 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into 3785dbf790ad4941b6e04868695b69fd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:07,289 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:07,289 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289106869; duration=0sec 2024-11-22T15:25:07,289 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:07,289 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:25:07,289 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:07,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:07,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:25:07,290 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:07,290 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/eee2ba456aa246c2a516245db264685e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.7 K 2024-11-22T15:25:07,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting eee2ba456aa246c2a516245db264685e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732289102642 2024-11-22T15:25:07,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b2f5674bd504e1daab621f948d7d95c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732289103196 2024-11-22T15:25:07,290 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f980da182c644e89b2c764d94789abd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:07,295 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#672 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:07,295 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/c19dbbaee6be444b966abe661e42c587 is 50, key is test_row_0/C:col10/1732289104454/Put/seqid=0 2024-11-22T15:25:07,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742614_1790 (size=13051) 2024-11-22T15:25:07,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:07,430 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122c148f1fafb2848abb975c947a4eb4265_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c148f1fafb2848abb975c947a4eb4265_33885ab322a31541d17da102b047512b 2024-11-22T15:25:07,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/393a87c94edb4dba9ca9ef347d46f32a, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:07,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/393a87c94edb4dba9ca9ef347d46f32a is 175, key is test_row_0/A:col10/1732289105596/Put/seqid=0 2024-11-22T15:25:07,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742615_1791 (size=31255) 2024-11-22T15:25:07,434 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/393a87c94edb4dba9ca9ef347d46f32a 2024-11-22T15:25:07,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/82ee357a73024fa897f74ae094da089c is 50, key is test_row_0/B:col10/1732289105596/Put/seqid=0 2024-11-22T15:25:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742616_1792 (size=12301) 2024-11-22T15:25:07,688 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/ac8349f8fdbc406c865deac00bcc1a2e as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e 2024-11-22T15:25:07,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into ac8349f8fdbc406c865deac00bcc1a2e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:07,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:07,691 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289106868; duration=0sec 2024-11-22T15:25:07,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:07,691 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:25:07,701 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/c19dbbaee6be444b966abe661e42c587 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c19dbbaee6be444b966abe661e42c587 2024-11-22T15:25:07,704 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into c19dbbaee6be444b966abe661e42c587(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:07,704 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:07,704 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289106870; duration=0sec 2024-11-22T15:25:07,704 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:07,704 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:25:07,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:07,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. as already flushing 2024-11-22T15:25:07,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289167774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:07,844 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/82ee357a73024fa897f74ae094da089c 2024-11-22T15:25:07,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46bbee0e73db40d39cc3e17d8dec732d is 50, key is test_row_0/C:col10/1732289105596/Put/seqid=0 2024-11-22T15:25:07,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742617_1793 (size=12301) 2024-11-22T15:25:07,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289167877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:08,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-22T15:25:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36033 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40724 deadline: 1732289168080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 2024-11-22T15:25:08,088 DEBUG [Thread-3048 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:52970 2024-11-22T15:25:08,088 DEBUG [Thread-3048 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,089 DEBUG [Thread-3042 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e757135 to 127.0.0.1:52970 2024-11-22T15:25:08,089 DEBUG [Thread-3042 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,089 DEBUG [Thread-3044 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:52970 2024-11-22T15:25:08,089 DEBUG [Thread-3044 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,090 DEBUG [Thread-3050 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:52970 2024-11-22T15:25:08,090 DEBUG [Thread-3050 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,090 DEBUG [Thread-3046 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:52970 2024-11-22T15:25:08,091 DEBUG [Thread-3046 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,252 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46bbee0e73db40d39cc3e17d8dec732d 2024-11-22T15:25:08,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/393a87c94edb4dba9ca9ef347d46f32a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a 2024-11-22T15:25:08,267 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a, entries=150, sequenceid=331, filesize=30.5 K 2024-11-22T15:25:08,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/82ee357a73024fa897f74ae094da089c as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c 2024-11-22T15:25:08,272 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c, entries=150, sequenceid=331, filesize=12.0 K 2024-11-22T15:25:08,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/46bbee0e73db40d39cc3e17d8dec732d as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d 2024-11-22T15:25:08,275 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d, entries=150, sequenceid=331, filesize=12.0 K 2024-11-22T15:25:08,276 INFO [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 33885ab322a31541d17da102b047512b in 1257ms, sequenceid=331, compaction requested=false 2024-11-22T15:25:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/77927f992d0b:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-22T15:25:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-22T15:25:08,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-22T15:25:08,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4740 sec 2024-11-22T15:25:08,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.4770 sec 2024-11-22T15:25:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36033 {}] regionserver.HRegion(8581): Flush requested on 33885ab322a31541d17da102b047512b 2024-11-22T15:25:08,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:08,386 DEBUG [Thread-3035 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:52970 2024-11-22T15:25:08,386 DEBUG [Thread-3035 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:08,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:08,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222d441f85b5224e6091998c7560d3a385_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:08,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742618_1794 (size=12454) 2024-11-22T15:25:08,795 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:08,804 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411222d441f85b5224e6091998c7560d3a385_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222d441f85b5224e6091998c7560d3a385_33885ab322a31541d17da102b047512b 2024-11-22T15:25:08,805 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/23265f7c72fb44be9d7b6fbf6c5ad31b, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:08,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/23265f7c72fb44be9d7b6fbf6c5ad31b is 175, key is test_row_0/A:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742619_1795 (size=31255) 2024-11-22T15:25:09,210 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=362, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/23265f7c72fb44be9d7b6fbf6c5ad31b 2024-11-22T15:25:09,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b2c3816229a466cb6f84bcf3cd390e6 is 50, key is test_row_0/B:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:09,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742620_1796 (size=12301) 2024-11-22T15:25:09,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b2c3816229a466cb6f84bcf3cd390e6 2024-11-22T15:25:09,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/37f9e49c7033404595e7e0f10579055b is 50, key is test_row_0/C:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:09,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742621_1797 (size=12301) 2024-11-22T15:25:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-22T15:25:09,908 INFO [Thread-3041 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-22T15:25:10,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/37f9e49c7033404595e7e0f10579055b 2024-11-22T15:25:10,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/23265f7c72fb44be9d7b6fbf6c5ad31b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b 2024-11-22T15:25:10,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b, entries=150, sequenceid=362, filesize=30.5 K 2024-11-22T15:25:10,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/9b2c3816229a466cb6f84bcf3cd390e6 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6 2024-11-22T15:25:10,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6, entries=150, sequenceid=362, filesize=12.0 K 2024-11-22T15:25:10,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/37f9e49c7033404595e7e0f10579055b as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b 2024-11-22T15:25:10,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b, entries=150, sequenceid=362, filesize=12.0 K 2024-11-22T15:25:10,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=0 B/0 for 33885ab322a31541d17da102b047512b in 1678ms, sequenceid=362, compaction requested=true 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:A, priority=-2147483648, current under compaction store size is 1 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:10,064 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:B, priority=-2147483648, current under compaction store size is 2 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:10,064 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 33885ab322a31541d17da102b047512b:C, priority=-2147483648, current under compaction store size is 3 2024-11-22T15:25:10,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/B is initiating minor compaction (all files) 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/A is initiating minor compaction (all files) 2024-11-22T15:25:10,065 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/A in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:10,065 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/B in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:10,065 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=92.3 K 2024-11-22T15:25:10,065 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/3785dbf790ad4941b6e04868695b69fd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.8 K 2024-11-22T15:25:10,065 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. files: [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b] 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 3785dbf790ad4941b6e04868695b69fd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac8349f8fdbc406c865deac00bcc1a2e, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 82ee357a73024fa897f74ae094da089c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732289105594 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 393a87c94edb4dba9ca9ef347d46f32a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732289105594 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b2c3816229a466cb6f84bcf3cd390e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732289107766 2024-11-22T15:25:10,065 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23265f7c72fb44be9d7b6fbf6c5ad31b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732289107766 2024-11-22T15:25:10,069 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:10,070 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#B#compaction#678 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:10,070 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/eb3f6bc428b3425e8fe15a6247e0d1e9 is 50, key is test_row_0/B:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:10,071 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241122590c3ad6027844b5aa2efe87885f9de2_33885ab322a31541d17da102b047512b store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742622_1798 (size=13153) 2024-11-22T15:25:10,074 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241122590c3ad6027844b5aa2efe87885f9de2_33885ab322a31541d17da102b047512b, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:10,074 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241122590c3ad6027844b5aa2efe87885f9de2_33885ab322a31541d17da102b047512b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:10,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742623_1799 (size=4469) 2024-11-22T15:25:10,297 DEBUG [Thread-3039 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:52970 2024-11-22T15:25:10,298 DEBUG [Thread-3039 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:10,480 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#A#compaction#679 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:10,481 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a08366d757e34a7fb768fcd341716d58 is 175, key is test_row_0/A:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:10,486 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/eb3f6bc428b3425e8fe15a6247e0d1e9 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/eb3f6bc428b3425e8fe15a6247e0d1e9 2024-11-22T15:25:10,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742624_1800 (size=32107) 2024-11-22T15:25:10,490 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/B of 33885ab322a31541d17da102b047512b into eb3f6bc428b3425e8fe15a6247e0d1e9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:10,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:10,490 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/B, priority=13, startTime=1732289110064; duration=0sec 2024-11-22T15:25:10,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-22T15:25:10,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:B 2024-11-22T15:25:10,490 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T15:25:10,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T15:25:10,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1540): 33885ab322a31541d17da102b047512b/C is initiating minor compaction (all files) 2024-11-22T15:25:10,491 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 33885ab322a31541d17da102b047512b/C in TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:10,491 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c19dbbaee6be444b966abe661e42c587, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b] into tmpdir=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp, totalSize=36.8 K 2024-11-22T15:25:10,491 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting c19dbbaee6be444b966abe661e42c587, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732289104450 2024-11-22T15:25:10,492 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 46bbee0e73db40d39cc3e17d8dec732d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732289105594 2024-11-22T15:25:10,492 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] compactions.Compactor(224): Compacting 37f9e49c7033404595e7e0f10579055b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732289107766 2024-11-22T15:25:10,499 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 33885ab322a31541d17da102b047512b#C#compaction#680 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T15:25:10,499 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/25c59d9e63274f8a80082e80f995f11f is 50, key is test_row_0/C:col10/1732289108385/Put/seqid=0 2024-11-22T15:25:10,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742625_1801 (size=13153) 2024-11-22T15:25:10,813 DEBUG [Thread-3033 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:52970 2024-11-22T15:25:10,813 DEBUG [Thread-3033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:10,841 DEBUG [Thread-3031 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:52970 2024-11-22T15:25:10,841 DEBUG [Thread-3031 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:10,848 DEBUG [Thread-3037 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:52970 2024-11-22T15:25:10,848 DEBUG [Thread-3037 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:10,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-22T15:25:10,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-22T15:25:10,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-22T15:25:10,848 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5761 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5854 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5564 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5806 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5878 2024-11-22T15:25:10,849 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-22T15:25:10,849 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:25:10,849 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2209c520 to 127.0.0.1:52970 2024-11-22T15:25:10,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:10,850 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-22T15:25:10,851 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-22T15:25:10,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:10,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289110855"}]},"ts":"1732289110855"} 2024-11-22T15:25:10,857 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-22T15:25:10,896 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/a08366d757e34a7fb768fcd341716d58 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a08366d757e34a7fb768fcd341716d58 2024-11-22T15:25:10,902 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/A of 33885ab322a31541d17da102b047512b into a08366d757e34a7fb768fcd341716d58(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:10,902 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:10,902 INFO [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/A, priority=13, startTime=1732289110064; duration=0sec 2024-11-22T15:25:10,903 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:10,903 DEBUG [RS:0;77927f992d0b:36033-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:A 2024-11-22T15:25:10,908 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/25c59d9e63274f8a80082e80f995f11f as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/25c59d9e63274f8a80082e80f995f11f 2024-11-22T15:25:10,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 33885ab322a31541d17da102b047512b/C of 33885ab322a31541d17da102b047512b into 25c59d9e63274f8a80082e80f995f11f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T15:25:10,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:10,913 INFO [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b., storeName=33885ab322a31541d17da102b047512b/C, priority=13, startTime=1732289110064; duration=0sec 2024-11-22T15:25:10,913 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T15:25:10,914 DEBUG [RS:0;77927f992d0b:36033-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 33885ab322a31541d17da102b047512b:C 2024-11-22T15:25:10,916 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-22T15:25:10,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-22T15:25:10,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, UNASSIGN}] 2024-11-22T15:25:10,918 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, UNASSIGN 2024-11-22T15:25:10,919 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=CLOSING, regionLocation=77927f992d0b,36033,1732288915809 2024-11-22T15:25:10,920 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-22T15:25:10,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809}] 2024-11-22T15:25:10,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:11,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 77927f992d0b,36033,1732288915809 2024-11-22T15:25:11,071 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close 33885ab322a31541d17da102b047512b 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing 33885ab322a31541d17da102b047512b, disabling compactions & flushes 2024-11-22T15:25:11,072 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. after waiting 0 ms 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:11,072 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing 33885ab322a31541d17da102b047512b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=A 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=B 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 33885ab322a31541d17da102b047512b, store=C 2024-11-22T15:25:11,072 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-22T15:25:11,077 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224bbf2189add94fc8ac79d60f238f0aa2_33885ab322a31541d17da102b047512b is 50, key is test_row_0/A:col10/1732289110838/Put/seqid=0 2024-11-22T15:25:11,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742626_1802 (size=12454) 2024-11-22T15:25:11,081 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T15:25:11,084 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411224bbf2189add94fc8ac79d60f238f0aa2_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224bbf2189add94fc8ac79d60f238f0aa2_33885ab322a31541d17da102b047512b 2024-11-22T15:25:11,085 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/bbef5fba421a4dcf8447b71be88aa0ff, store: [table=TestAcidGuarantees family=A region=33885ab322a31541d17da102b047512b] 2024-11-22T15:25:11,085 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/bbef5fba421a4dcf8447b71be88aa0ff is 175, key is test_row_0/A:col10/1732289110838/Put/seqid=0 2024-11-22T15:25:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742627_1803 (size=31255) 2024-11-22T15:25:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:11,489 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/bbef5fba421a4dcf8447b71be88aa0ff 2024-11-22T15:25:11,501 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/07b041d2ae43424392ebe99f8714f1b4 is 50, key is test_row_0/B:col10/1732289110838/Put/seqid=0 2024-11-22T15:25:11,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742628_1804 (size=12301) 2024-11-22T15:25:11,906 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/07b041d2ae43424392ebe99f8714f1b4 2024-11-22T15:25:11,918 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/8ea7b58003af4c6a928a761923b4c973 is 50, key is test_row_0/C:col10/1732289110838/Put/seqid=0 2024-11-22T15:25:11,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742629_1805 (size=12301) 2024-11-22T15:25:11,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:12,322 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/8ea7b58003af4c6a928a761923b4c973 2024-11-22T15:25:12,325 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/A/bbef5fba421a4dcf8447b71be88aa0ff as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/bbef5fba421a4dcf8447b71be88aa0ff 2024-11-22T15:25:12,328 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/bbef5fba421a4dcf8447b71be88aa0ff, entries=150, sequenceid=372, filesize=30.5 K 2024-11-22T15:25:12,328 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/B/07b041d2ae43424392ebe99f8714f1b4 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/07b041d2ae43424392ebe99f8714f1b4 2024-11-22T15:25:12,331 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/07b041d2ae43424392ebe99f8714f1b4, entries=150, sequenceid=372, filesize=12.0 K 2024-11-22T15:25:12,331 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/.tmp/C/8ea7b58003af4c6a928a761923b4c973 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/8ea7b58003af4c6a928a761923b4c973 2024-11-22T15:25:12,334 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/8ea7b58003af4c6a928a761923b4c973, entries=150, sequenceid=372, filesize=12.0 K 2024-11-22T15:25:12,334 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 33885ab322a31541d17da102b047512b in 1262ms, sequenceid=372, compaction requested=false 2024-11-22T15:25:12,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b] to archive 2024-11-22T15:25:12,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:25:12,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/1d827f1db37040a4917d32e00228321f 2024-11-22T15:25:12,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8226ecd475c74c4ea15806cb4f885ea0 2024-11-22T15:25:12,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/b8b58282becd438e9f6305186e142f3b 2024-11-22T15:25:12,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/7059e3be1f96435ab0826040af2a4822 2024-11-22T15:25:12,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/9e08f9d8c7864999b84ee58a6236bfab 2024-11-22T15:25:12,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/79976962067f42dfa7e29fe798bf87c9 2024-11-22T15:25:12,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f183745a497446bbb7ca8d48cae03878 2024-11-22T15:25:12,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/78907f1e2c124299b6bf7162930a6a6f 2024-11-22T15:25:12,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/08e3e94c594d40f6800b117668ef45c2 2024-11-22T15:25:12,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/872389ffff0645f898cdd74e0876a7fb 2024-11-22T15:25:12,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/2c7dbdad8c7a44cbb4184a2c799182b7 2024-11-22T15:25:12,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/cf5af5d41e824675bf6d5da3d7fd3d52 2024-11-22T15:25:12,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/58275b4105ec46728d3e1f8b583eb541 2024-11-22T15:25:12,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ec18676388fe43cf91fcc3fd5f31aa40 2024-11-22T15:25:12,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/8b06aa277e814e39a1821d1123159bce 2024-11-22T15:25:12,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/f91d1f1f924b436d8411e0ddb85396df 2024-11-22T15:25:12,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ca5af8c360e542db8d35a668e048c31a 2024-11-22T15:25:12,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/0236bf0de2c742b7875d997b7fe10228 2024-11-22T15:25:12,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a5dc9625a15a43238925077e94de5136 2024-11-22T15:25:12,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/6859d05e60c54ec0a7e9b49cc5249c1e 2024-11-22T15:25:12,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/127f83de7da44532aad18e7bff44781c 2024-11-22T15:25:12,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/48fe1b62bf8e4f7390c602ad4a47ef88 2024-11-22T15:25:12,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/ac8349f8fdbc406c865deac00bcc1a2e 2024-11-22T15:25:12,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/393a87c94edb4dba9ca9ef347d46f32a 2024-11-22T15:25:12,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/23265f7c72fb44be9d7b6fbf6c5ad31b 2024-11-22T15:25:12,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/c8096ced47c241e69e0d8a67c3b2a0c1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f26571ab594431ebdfad5f592caba21, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/da28587e3b1346ce87bc87044d7c244e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/502062493ce6457ebcfe89fd5cefe1aa, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f018b8da8d244d7f92630c8b677db084, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/e9914bdeb9c341dba66ea1b19812d2dc, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/3785dbf790ad4941b6e04868695b69fd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6] to archive 2024-11-22T15:25:12,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:25:12,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f6e09938d444f33acf604efa5119b78 2024-11-22T15:25:12,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/af302f7bcd0a40fb9a93bc9a875f8999 2024-11-22T15:25:12,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f337afb61553453995195f568c849e19 2024-11-22T15:25:12,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/c8096ced47c241e69e0d8a67c3b2a0c1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/c8096ced47c241e69e0d8a67c3b2a0c1 2024-11-22T15:25:12,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0ed0f807e6714156afd2c03bbf8830e2 2024-11-22T15:25:12,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b02fd509ec448e3a8d7df374979827e 2024-11-22T15:25:12,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f26571ab594431ebdfad5f592caba21 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2f26571ab594431ebdfad5f592caba21 2024-11-22T15:25:12,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/14a989f59e144d38b0c7f1c0c31c64cb 2024-11-22T15:25:12,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/2535c78af7d547418457b1686dfa8796 2024-11-22T15:25:12,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/da28587e3b1346ce87bc87044d7c244e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/da28587e3b1346ce87bc87044d7c244e 2024-11-22T15:25:12,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f76e3b2076d64fd49e3fb742334d47ee 2024-11-22T15:25:12,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/0a4b0b132eb14ae7a4ea67618fda11a1 2024-11-22T15:25:12,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/502062493ce6457ebcfe89fd5cefe1aa to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/502062493ce6457ebcfe89fd5cefe1aa 2024-11-22T15:25:12,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/965d28c13e0448b6a4045bd7a98b218e 2024-11-22T15:25:12,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/102b615823f141ddbecd2c175c0c45f0 2024-11-22T15:25:12,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f018b8da8d244d7f92630c8b677db084 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/f018b8da8d244d7f92630c8b677db084 2024-11-22T15:25:12,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/b61fd135b1b847f8a7cb1ab59714cad5 2024-11-22T15:25:12,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/a57573e40e78407db5f7522c8dc4c8d0 2024-11-22T15:25:12,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/e9914bdeb9c341dba66ea1b19812d2dc to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/e9914bdeb9c341dba66ea1b19812d2dc 2024-11-22T15:25:12,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ff3c70de7e064deab6cb79d75dcca0ed 2024-11-22T15:25:12,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/4950f4b834af4e3f89cd1935e594a44e 2024-11-22T15:25:12,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/3785dbf790ad4941b6e04868695b69fd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/3785dbf790ad4941b6e04868695b69fd 2024-11-22T15:25:12,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/ea4d4a2d1151439ea9c293991b408f3d 2024-11-22T15:25:12,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/82ee357a73024fa897f74ae094da089c 2024-11-22T15:25:12,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/9b2c3816229a466cb6f84bcf3cd390e6 2024-11-22T15:25:12,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/fdde57c11a5b44a0906807406844b9bf, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/718e449caab648989a65cb1c9dc9de8c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/de7d071def2a40cd9f6ada397bd9d9e5, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/2d722333ccb547d8aec912aaf78bef9c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/09868501b4864c78b385926418fd77e8, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/eee2ba456aa246c2a516245db264685e, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c19dbbaee6be444b966abe661e42c587, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b] to archive 2024-11-22T15:25:12,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T15:25:12,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/e89cd632b1fa481ba8a29f6bdfd32deb 2024-11-22T15:25:12,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/7fd1176917814c57afe4d2e7531c6933 2024-11-22T15:25:12,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/4b408c027ba94ff1b796816392305c19 2024-11-22T15:25:12,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/fdde57c11a5b44a0906807406844b9bf to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/fdde57c11a5b44a0906807406844b9bf 2024-11-22T15:25:12,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/56bdf4db8cf24d15b1ab634497e4295d 2024-11-22T15:25:12,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/3bf50a1ccc2644d78ad67e1eca1d4e81 2024-11-22T15:25:12,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/718e449caab648989a65cb1c9dc9de8c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/718e449caab648989a65cb1c9dc9de8c 2024-11-22T15:25:12,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/1538c6a6db0347a1aae5d2d2f1e32000 2024-11-22T15:25:12,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46e76416bdc1488785518b36fe61440e 2024-11-22T15:25:12,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/de7d071def2a40cd9f6ada397bd9d9e5 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/de7d071def2a40cd9f6ada397bd9d9e5 2024-11-22T15:25:12,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/050ff3cb395e4a4eaa1b99912f1f1ce1 2024-11-22T15:25:12,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/768abbacde7e4074b21f474cd77ac626 2024-11-22T15:25:12,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/2d722333ccb547d8aec912aaf78bef9c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/2d722333ccb547d8aec912aaf78bef9c 2024-11-22T15:25:12,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c2d27e9d14be491183e8bd4705b872d7 2024-11-22T15:25:12,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/80e85f0ca6ac4e5ca52a72e9dc52fa5a 2024-11-22T15:25:12,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/09868501b4864c78b385926418fd77e8 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/09868501b4864c78b385926418fd77e8 2024-11-22T15:25:12,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/33f6cf1e1975462ab5391cc9bf55a8e6 2024-11-22T15:25:12,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/d0648ce00f004e05a9d14de8af776c39 2024-11-22T15:25:12,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/eee2ba456aa246c2a516245db264685e to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/eee2ba456aa246c2a516245db264685e 2024-11-22T15:25:12,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/10a556caceee4f24987c0c9b55aac680 2024-11-22T15:25:12,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/9b2f5674bd504e1daab621f948d7d95c 2024-11-22T15:25:12,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c19dbbaee6be444b966abe661e42c587 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/c19dbbaee6be444b966abe661e42c587 2024-11-22T15:25:12,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/5f980da182c644e89b2c764d94789abd 2024-11-22T15:25:12,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/46bbee0e73db40d39cc3e17d8dec732d 2024-11-22T15:25:12,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/37f9e49c7033404595e7e0f10579055b 2024-11-22T15:25:12,391 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits/375.seqid, newMaxSeqId=375, maxSeqId=4 2024-11-22T15:25:12,392 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b. 2024-11-22T15:25:12,392 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for 33885ab322a31541d17da102b047512b: 2024-11-22T15:25:12,392 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed 33885ab322a31541d17da102b047512b 2024-11-22T15:25:12,393 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=33885ab322a31541d17da102b047512b, regionState=CLOSED 2024-11-22T15:25:12,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-22T15:25:12,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure 33885ab322a31541d17da102b047512b, server=77927f992d0b,36033,1732288915809 in 1.4730 sec 2024-11-22T15:25:12,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-11-22T15:25:12,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=33885ab322a31541d17da102b047512b, UNASSIGN in 1.4770 sec 2024-11-22T15:25:12,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-22T15:25:12,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4790 sec 2024-11-22T15:25:12,397 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732289112397"}]},"ts":"1732289112397"} 2024-11-22T15:25:12,398 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-22T15:25:12,407 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-22T15:25:12,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5570 sec 2024-11-22T15:25:12,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-22T15:25:12,961 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-22T15:25:12,962 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-22T15:25:12,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:12,964 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-22T15:25:12,965 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:12,968 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:25:12,972 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C, FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits] 2024-11-22T15:25:12,976 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a08366d757e34a7fb768fcd341716d58 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/a08366d757e34a7fb768fcd341716d58 2024-11-22T15:25:12,977 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/bbef5fba421a4dcf8447b71be88aa0ff to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/A/bbef5fba421a4dcf8447b71be88aa0ff 2024-11-22T15:25:12,980 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/07b041d2ae43424392ebe99f8714f1b4 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/07b041d2ae43424392ebe99f8714f1b4 2024-11-22T15:25:12,982 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/eb3f6bc428b3425e8fe15a6247e0d1e9 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/B/eb3f6bc428b3425e8fe15a6247e0d1e9 2024-11-22T15:25:12,985 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/25c59d9e63274f8a80082e80f995f11f to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/25c59d9e63274f8a80082e80f995f11f 2024-11-22T15:25:12,987 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/8ea7b58003af4c6a928a761923b4c973 to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/C/8ea7b58003af4c6a928a761923b4c973 2024-11-22T15:25:12,992 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits/375.seqid to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b/recovered.edits/375.seqid 2024-11-22T15:25:12,992 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/default/TestAcidGuarantees/33885ab322a31541d17da102b047512b 2024-11-22T15:25:12,993 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-22T15:25:12,993 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:25:12,994 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-22T15:25:12,999 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112204c2e2f9b75a4f50a3731899edb48432_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112204c2e2f9b75a4f50a3731899edb48432_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,001 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122262d091cd8d6422b91076f461498eccb_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122262d091cd8d6422b91076f461498eccb_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,003 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122287f73bb84d149dc89a9600f20c50baf_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122287f73bb84d149dc89a9600f20c50baf_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,005 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222d441f85b5224e6091998c7560d3a385_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411222d441f85b5224e6091998c7560d3a385_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,006 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122326ed48ce2484d5baa0bb1038bafa1e8_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122326ed48ce2484d5baa0bb1038bafa1e8_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,008 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224bbf2189add94fc8ac79d60f238f0aa2_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224bbf2189add94fc8ac79d60f238f0aa2_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,010 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224e125532676345789f21a97a10e3f85d_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411224e125532676345789f21a97a10e3f85d_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,012 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122540c1730b3364538928ba85cfc49d1d6_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122540c1730b3364538928ba85cfc49d1d6_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,014 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d15c63d17324fb3b85cb1225c2cdcb1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d15c63d17324fb3b85cb1225c2cdcb1_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,015 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d544a019bd046a4ad6428f5a2fa6ed1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411225d544a019bd046a4ad6428f5a2fa6ed1_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,017 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227606e87e042d4233867bced36c16bdf9_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411227606e87e042d4233867bced36c16bdf9_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,019 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122965496cce0684383ab18a5bbe7b9619a_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122965496cce0684383ab18a5bbe7b9619a_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,021 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229fb341dc795f44d6b3255ba41c277d2c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411229fb341dc795f44d6b3255ba41c277d2c_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,023 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122aceefd1f968e4be3a6497dde984dab5c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122aceefd1f968e4be3a6497dde984dab5c_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,025 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c148f1fafb2848abb975c947a4eb4265_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122c148f1fafb2848abb975c947a4eb4265_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,027 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc4c8f4c7a0742a2b617217abc0d4cc7_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122cc4c8f4c7a0742a2b617217abc0d4cc7_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,029 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e286ec1702bd4c9a83011161d94ba833_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122e286ec1702bd4c9a83011161d94ba833_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,031 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ef97f16746ff4fd4870588616cb4f36c_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122ef97f16746ff4fd4870588616cb4f36c_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,032 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f5549da637e14b0599434b11a9dbbea1_33885ab322a31541d17da102b047512b to hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241122f5549da637e14b0599434b11a9dbbea1_33885ab322a31541d17da102b047512b 2024-11-22T15:25:13,032 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-22T15:25:13,033 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:13,035 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-22T15:25:13,036 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-22T15:25:13,037 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:13,037 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-22T15:25:13,037 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732289113037"}]},"ts":"9223372036854775807"} 2024-11-22T15:25:13,038 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-22T15:25:13,038 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 33885ab322a31541d17da102b047512b, NAME => 'TestAcidGuarantees,,1732289084841.33885ab322a31541d17da102b047512b.', STARTKEY => '', ENDKEY => ''}] 2024-11-22T15:25:13,038 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-22T15:25:13,038 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732289113038"}]},"ts":"9223372036854775807"} 2024-11-22T15:25:13,039 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-22T15:25:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-22T15:25:13,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-22T15:25:13,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 113 msec 2024-11-22T15:25:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38317 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-22T15:25:13,267 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-22T15:25:13,283 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241 (was 237) - Thread LEAK? -, OpenFileDescriptor=457 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=623 (was 691), ProcessCount=11 (was 11), AvailableMemoryMB=5176 (was 5219) 2024-11-22T15:25:13,283 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-22T15:25:13,283 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-22T15:25:13,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:52970 2024-11-22T15:25:13,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:13,284 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T15:25:13,284 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1657766203, stopped=false 2024-11-22T15:25:13,284 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=77927f992d0b,38317,1732288914436 2024-11-22T15:25:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T15:25:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T15:25:13,291 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-22T15:25:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:25:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:25:13,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:13,292 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T15:25:13,292 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '77927f992d0b,36033,1732288915809' ***** 2024-11-22T15:25:13,292 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-22T15:25:13,292 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T15:25:13,292 INFO [RS:0;77927f992d0b:36033 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T15:25:13,292 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T15:25:13,292 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(3579): Received CLOSE for e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1224): stopping server 77927f992d0b,36033,1732288915809 2024-11-22T15:25:13,293 DEBUG [RS:0;77927f992d0b:36033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T15:25:13,293 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e7d35e0ae1b576a64b6f8105b0d3681e, disabling compactions & flushes 2024-11-22T15:25:13,294 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. after waiting 0 ms 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:25:13,294 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e7d35e0ae1b576a64b6f8105b0d3681e 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-22T15:25:13,294 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-22T15:25:13,294 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, e7d35e0ae1b576a64b6f8105b0d3681e=hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e.} 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-22T15:25:13,294 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T15:25:13,294 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T15:25:13,294 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-22T15:25:13,297 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:25:13,310 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/.tmp/info/047940e67ac04953b28498e6f576fae3 is 45, key is default/info:d/1732288921076/Put/seqid=0 2024-11-22T15:25:13,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742630_1806 (size=5037) 2024-11-22T15:25:13,317 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/info/93f2dcd663db46b0a6728b4e5b56caad is 143, key is hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e./info:regioninfo/1732288920942/Put/seqid=0 2024-11-22T15:25:13,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742631_1807 (size=7725) 2024-11-22T15:25:13,365 INFO [regionserver/77927f992d0b:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T15:25:13,498 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:25:13,609 INFO [regionserver/77927f992d0b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T15:25:13,609 INFO [regionserver/77927f992d0b:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T15:25:13,698 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e7d35e0ae1b576a64b6f8105b0d3681e 2024-11-22T15:25:13,715 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/.tmp/info/047940e67ac04953b28498e6f576fae3 2024-11-22T15:25:13,721 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/info/93f2dcd663db46b0a6728b4e5b56caad 2024-11-22T15:25:13,725 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/.tmp/info/047940e67ac04953b28498e6f576fae3 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/info/047940e67ac04953b28498e6f576fae3 2024-11-22T15:25:13,729 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/info/047940e67ac04953b28498e6f576fae3, entries=2, sequenceid=6, filesize=4.9 K 2024-11-22T15:25:13,730 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e7d35e0ae1b576a64b6f8105b0d3681e in 436ms, sequenceid=6, compaction requested=false 2024-11-22T15:25:13,734 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/namespace/e7d35e0ae1b576a64b6f8105b0d3681e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T15:25:13,734 INFO [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:25:13,734 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e7d35e0ae1b576a64b6f8105b0d3681e: 2024-11-22T15:25:13,735 DEBUG [RS_CLOSE_REGION-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732288919886.e7d35e0ae1b576a64b6f8105b0d3681e. 2024-11-22T15:25:13,745 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/rep_barrier/bd1e7e12344b4f499543760f45d6cf78 is 102, key is TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f./rep_barrier:/1732288955149/DeleteFamily/seqid=0 2024-11-22T15:25:13,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742632_1808 (size=6025) 2024-11-22T15:25:13,898 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T15:25:14,099 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T15:25:14,149 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/rep_barrier/bd1e7e12344b4f499543760f45d6cf78 2024-11-22T15:25:14,168 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/table/c445be238a5d455ca1c121d67e074566 is 96, key is TestAcidGuarantees,,1732288921496.ed6f777bba2efed5f759348895e3133f./table:/1732288955149/DeleteFamily/seqid=0 2024-11-22T15:25:14,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742633_1809 (size=5942) 2024-11-22T15:25:14,299 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-22T15:25:14,299 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T15:25:14,299 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T15:25:14,499 DEBUG [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-22T15:25:14,572 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/table/c445be238a5d455ca1c121d67e074566 2024-11-22T15:25:14,581 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/info/93f2dcd663db46b0a6728b4e5b56caad as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/info/93f2dcd663db46b0a6728b4e5b56caad 2024-11-22T15:25:14,584 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/info/93f2dcd663db46b0a6728b4e5b56caad, entries=22, sequenceid=93, filesize=7.5 K 2024-11-22T15:25:14,585 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/rep_barrier/bd1e7e12344b4f499543760f45d6cf78 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/rep_barrier/bd1e7e12344b4f499543760f45d6cf78 2024-11-22T15:25:14,588 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/rep_barrier/bd1e7e12344b4f499543760f45d6cf78, entries=6, sequenceid=93, filesize=5.9 K 2024-11-22T15:25:14,588 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/.tmp/table/c445be238a5d455ca1c121d67e074566 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/table/c445be238a5d455ca1c121d67e074566 2024-11-22T15:25:14,592 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/table/c445be238a5d455ca1c121d67e074566, entries=9, sequenceid=93, filesize=5.8 K 2024-11-22T15:25:14,593 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1299ms, sequenceid=93, compaction requested=false 2024-11-22T15:25:14,597 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-22T15:25:14,597 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T15:25:14,598 INFO [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-22T15:25:14,598 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-22T15:25:14,598 DEBUG [RS_CLOSE_META-regionserver/77927f992d0b:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T15:25:14,700 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1250): stopping server 77927f992d0b,36033,1732288915809; all regions closed. 2024-11-22T15:25:14,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741834_1010 (size=26050) 2024-11-22T15:25:14,706 DEBUG [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/oldWALs 2024-11-22T15:25:14,706 INFO [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 77927f992d0b%2C36033%2C1732288915809.meta:.meta(num 1732288919254) 2024-11-22T15:25:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741833_1009 (size=18631704) 2024-11-22T15:25:14,710 DEBUG [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/oldWALs 2024-11-22T15:25:14,710 INFO [RS:0;77927f992d0b:36033 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 77927f992d0b%2C36033%2C1732288915809:(num 1732288918783) 2024-11-22T15:25:14,710 DEBUG [RS:0;77927f992d0b:36033 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:14,711 INFO [RS:0;77927f992d0b:36033 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T15:25:14,711 INFO [RS:0;77927f992d0b:36033 {}] hbase.ChoreService(370): Chore service for: regionserver/77927f992d0b:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T15:25:14,711 INFO [regionserver/77927f992d0b:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-22T15:25:14,712 INFO [RS:0;77927f992d0b:36033 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36033 2024-11-22T15:25:14,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T15:25:14,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/77927f992d0b,36033,1732288915809 2024-11-22T15:25:14,750 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$357/0x00007f2c208f3530@1c67b454 rejected from java.util.concurrent.ThreadPoolExecutor@596529bc[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-22T15:25:14,751 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [77927f992d0b,36033,1732288915809] 2024-11-22T15:25:14,751 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 77927f992d0b,36033,1732288915809; numProcessing=1 2024-11-22T15:25:14,766 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/77927f992d0b,36033,1732288915809 already deleted, retry=false 2024-11-22T15:25:14,766 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 77927f992d0b,36033,1732288915809 expired; onlineServers=0 2024-11-22T15:25:14,766 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '77927f992d0b,38317,1732288914436' ***** 2024-11-22T15:25:14,766 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T15:25:14,767 DEBUG [M:0;77927f992d0b:38317 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c323138, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=77927f992d0b/172.17.0.2:0 2024-11-22T15:25:14,767 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegionServer(1224): stopping server 77927f992d0b,38317,1732288914436 2024-11-22T15:25:14,767 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegionServer(1250): stopping server 77927f992d0b,38317,1732288914436; all regions closed. 2024-11-22T15:25:14,767 DEBUG [M:0;77927f992d0b:38317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T15:25:14,767 DEBUG [M:0;77927f992d0b:38317 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T15:25:14,768 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T15:25:14,768 DEBUG [M:0;77927f992d0b:38317 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T15:25:14,768 DEBUG [master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.large.0-1732288918246 {}] cleaner.HFileCleaner(306): Exit Thread[master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.large.0-1732288918246,5,FailOnTimeoutGroup] 2024-11-22T15:25:14,768 DEBUG [master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.small.0-1732288918251 {}] cleaner.HFileCleaner(306): Exit Thread[master/77927f992d0b:0:becomeActiveMaster-HFileCleaner.small.0-1732288918251,5,FailOnTimeoutGroup] 2024-11-22T15:25:14,768 INFO [M:0;77927f992d0b:38317 {}] hbase.ChoreService(370): Chore service for: master/77927f992d0b:0 had [] on shutdown 2024-11-22T15:25:14,769 DEBUG [M:0;77927f992d0b:38317 {}] master.HMaster(1733): Stopping service threads 2024-11-22T15:25:14,769 INFO [M:0;77927f992d0b:38317 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T15:25:14,769 ERROR [M:0;77927f992d0b:38317 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:34007 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34007,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-22T15:25:14,771 INFO [M:0;77927f992d0b:38317 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T15:25:14,771 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T15:25:14,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T15:25:14,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T15:25:14,775 DEBUG [M:0;77927f992d0b:38317 {}] zookeeper.ZKUtil(347): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T15:25:14,775 WARN [M:0;77927f992d0b:38317 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T15:25:14,775 INFO [M:0;77927f992d0b:38317 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-22T15:25:14,775 INFO [M:0;77927f992d0b:38317 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T15:25:14,775 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T15:25:14,776 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:25:14,776 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:25:14,776 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T15:25:14,776 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T15:25:14,776 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:25:14,776 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=811.93 KB heapSize=1001.38 KB 2024-11-22T15:25:14,790 DEBUG [M:0;77927f992d0b:38317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc8b54934924506846171c5dc011c07 is 82, key is hbase:meta,,1/info:regioninfo/1732288919621/Put/seqid=0 2024-11-22T15:25:14,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742634_1810 (size=5672) 2024-11-22T15:25:14,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T15:25:14,858 INFO [RS:0;77927f992d0b:36033 {}] regionserver.HRegionServer(1307): Exiting; stopping=77927f992d0b,36033,1732288915809; zookeeper connection closed. 2024-11-22T15:25:14,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36033-0x101646cc1b90001, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T15:25:14,859 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16fa1848 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16fa1848 2024-11-22T15:25:14,859 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T15:25:15,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T15:25:15,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T15:25:15,110 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-22T15:25:15,111 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-22T15:25:15,194 INFO [M:0;77927f992d0b:38317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2346 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc8b54934924506846171c5dc011c07 2024-11-22T15:25:15,213 DEBUG [M:0;77927f992d0b:38317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5d8253af9554a0d90057a1654c3772a is 2284, key is \x00\x00\x00\x00\x00\x00\x00a/proc:d/1732289026581/Put/seqid=0 2024-11-22T15:25:15,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742635_1811 (size=45824) 2024-11-22T15:25:15,617 INFO [M:0;77927f992d0b:38317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=811.37 KB at sequenceid=2346 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5d8253af9554a0d90057a1654c3772a 2024-11-22T15:25:15,620 INFO [M:0;77927f992d0b:38317 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c5d8253af9554a0d90057a1654c3772a 2024-11-22T15:25:15,633 DEBUG [M:0;77927f992d0b:38317 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e6d25a4cf394604b131b70e7c5c73e7 is 69, key is 77927f992d0b,36033,1732288915809/rs:state/1732288918289/Put/seqid=0 2024-11-22T15:25:15,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073742636_1812 (size=5156) 2024-11-22T15:25:16,037 INFO [M:0;77927f992d0b:38317 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2346 (bloomFilter=true), to=hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e6d25a4cf394604b131b70e7c5c73e7 2024-11-22T15:25:16,045 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1fc8b54934924506846171c5dc011c07 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fc8b54934924506846171c5dc011c07 2024-11-22T15:25:16,049 INFO [M:0;77927f992d0b:38317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1fc8b54934924506846171c5dc011c07, entries=8, sequenceid=2346, filesize=5.5 K 2024-11-22T15:25:16,050 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c5d8253af9554a0d90057a1654c3772a as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c5d8253af9554a0d90057a1654c3772a 2024-11-22T15:25:16,053 INFO [M:0;77927f992d0b:38317 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c5d8253af9554a0d90057a1654c3772a 2024-11-22T15:25:16,053 INFO [M:0;77927f992d0b:38317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c5d8253af9554a0d90057a1654c3772a, entries=183, sequenceid=2346, filesize=44.8 K 2024-11-22T15:25:16,054 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e6d25a4cf394604b131b70e7c5c73e7 as hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e6d25a4cf394604b131b70e7c5c73e7 2024-11-22T15:25:16,057 INFO [M:0;77927f992d0b:38317 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34007/user/jenkins/test-data/4736ff95-9d8c-9b3f-7602-60d900713690/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e6d25a4cf394604b131b70e7c5c73e7, entries=1, sequenceid=2346, filesize=5.0 K 2024-11-22T15:25:16,058 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegion(3040): Finished flush of dataSize ~811.93 KB/831413, heapSize ~1001.09 KB/1025112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1282ms, sequenceid=2346, compaction requested=false 2024-11-22T15:25:16,059 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T15:25:16,059 DEBUG [M:0;77927f992d0b:38317 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-22T15:25:16,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42059 is added to blk_1073741830_1006 (size=985158) 2024-11-22T15:25:16,061 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-22T15:25:16,061 INFO [M:0;77927f992d0b:38317 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-22T15:25:16,061 INFO [M:0;77927f992d0b:38317 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38317 2024-11-22T15:25:16,099 DEBUG [M:0;77927f992d0b:38317 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/77927f992d0b,38317,1732288914436 already deleted, retry=false 2024-11-22T15:25:16,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T15:25:16,208 INFO [M:0;77927f992d0b:38317 {}] regionserver.HRegionServer(1307): Exiting; stopping=77927f992d0b,38317,1732288914436; zookeeper connection closed. 2024-11-22T15:25:16,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38317-0x101646cc1b90000, quorum=127.0.0.1:52970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T15:25:16,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T15:25:16,220 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T15:25:16,220 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T15:25:16,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T15:25:16,220 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.log.dir/,STOPPED} 2024-11-22T15:25:16,223 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T15:25:16,224 WARN [BP-1976188293-172.17.0.2-1732288907727 heartbeating to localhost/127.0.0.1:34007 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T15:25:16,224 WARN [BP-1976188293-172.17.0.2-1732288907727 heartbeating to localhost/127.0.0.1:34007 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1976188293-172.17.0.2-1732288907727 (Datanode Uuid 38ee6b56-866f-4b24-8b0c-ca8404a19b80) service to localhost/127.0.0.1:34007 2024-11-22T15:25:16,224 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T15:25:16,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/dfs/data/data1/current/BP-1976188293-172.17.0.2-1732288907727 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T15:25:16,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/cluster_7c33ed0c-a730-aa33-6163-a3da973a22a2/dfs/data/data2/current/BP-1976188293-172.17.0.2-1732288907727 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T15:25:16,227 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T15:25:16,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T15:25:16,236 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T15:25:16,236 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T15:25:16,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T15:25:16,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/287c6e19-5b45-afa6-c064-175d83a47581/hadoop.log.dir/,STOPPED} 2024-11-22T15:25:16,260 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-22T15:25:16,411 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down